You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
This benchmark for InsertRequest shows that unmarshal latency is 3x faster than the original proto unmarshal method, while marshal latency is 70% slower than the original proto marshal method.
Another key advantage is reduced CPU usage, with unmarshalling saving about 1X CPU time and marshalling saving 25%. Using the memory pool function of vtprotobuf will further reduce CPU usage.
benchmark code snippet
func getInsertReq() *protos.InsertRequest {
return &protos.InsertRequest{
DbName: "db1",
CollectionName: "col1",
FieldsData: []*protos.FieldData{NewFloatVectorFieldData("f1", 1000, 768)},
HashKeys: GenerateHashKeys(1000),
NumRows: uint32(1000),
}
}
func BenchmarkInsertRequest(b *testing.B) {
req := getInsertReq()
bs, err := proto.Marshal(req)
if err != nil {
b.Fatal(err)
}
b.Run("Marshal", func(b *testing.B) {
total := 0
for i := 0; i < b.N; i++ {
bs, err := proto.Marshal(req)
total += len(bs)
if err != nil {
b.Fatal(err)
}
}
b.ReportMetric(float64(total)/float64(b.N), "bytes")
})
b.Run("MarshalVT", func(b *testing.B) {
total := 0
for i := 0; i < b.N; i++ {
bs, err := req.MarshalVT()
total += len(bs)
if err != nil {
b.Fatal(err)
}
}
b.ReportMetric(float64(total)/float64(b.N), "bytes")
})
b.Run("Unmarshal", func(b *testing.B) {
total := 0
for i := 0; i < b.N; i++ {
var l protos.InsertRequest
total += len(bs)
if err := proto.Unmarshal(bs, &l); err != nil {
b.Fatal(err)
}
}
b.ReportMetric(float64(total)/float64(b.N), "bytes")
})
b.Run("UnmarshalVT", func(b *testing.B) {
total := 0
for i := 0; i < b.N; i++ {
var l protos.InsertRequest
total += len(bs)
if err := l.UnmarshalVT(bs); err != nil {
b.Fatal(err)
}
}
b.ReportMetric(float64(total)/float64(b.N), "bytes")
})
}
Why is this needed?
No response
Anything else?
No response
The text was updated successfully, but these errors were encountered:
Is there an existing issue for this?
What would you like to be added?
See more about https://github.com/planetscale/vtprotobuf
This benchmark for InsertRequest shows that unmarshal latency is 3x faster than the original proto unmarshal method, while marshal latency is 70% slower than the original proto marshal method.
Another key advantage is reduced CPU usage, with unmarshalling saving about 1X CPU time and marshalling saving 25%. Using the memory pool function of vtprotobuf will further reduce CPU usage.
benchmark code snippet
Why is this needed?
No response
Anything else?
No response
The text was updated successfully, but these errors were encountered: