From 82c5cf2fa2c3781ea316bc9bd6ed5c78365f7b15 Mon Sep 17 00:00:00 2001 From: Buqian Zheng Date: Sat, 12 Oct 2024 17:19:20 +0800 Subject: [PATCH] feat: add bulk insert support for Functions (#36715) issue: https://github.com/milvus-io/milvus/issues/35853 and https://github.com/milvus-io/milvus/issues/35856 Signed-off-by: Buqian Zheng --- internal/datanode/importv2/hash.go | 2 +- internal/datanode/importv2/task_import.go | 17 ++++++- internal/datanode/importv2/task_l0_import.go | 2 +- internal/datanode/importv2/util.go | 48 +++++++++++++++++++ internal/storage/insert_data.go | 16 ++++++- internal/util/importutilv2/binlog/util.go | 1 + internal/util/importutilv2/csv/reader_test.go | 18 +++++++ internal/util/importutilv2/csv/row_parser.go | 32 ++++++------- .../util/importutilv2/json/reader_test.go | 19 ++++++++ internal/util/importutilv2/json/row_parser.go | 17 +++---- internal/util/importutilv2/numpy/reader.go | 16 +++++-- .../util/importutilv2/numpy/reader_test.go | 38 +++++++++++++++ .../util/importutilv2/parquet/reader_test.go | 18 +++++++ internal/util/importutilv2/parquet/util.go | 8 ++-- internal/util/testutil/test_util.go | 18 ++++--- pkg/util/testutils/gen_data.go | 16 ++++++- 16 files changed, 235 insertions(+), 51 deletions(-) diff --git a/internal/datanode/importv2/hash.go b/internal/datanode/importv2/hash.go index b7070527d8104..8ab0834b7bcc9 100644 --- a/internal/datanode/importv2/hash.go +++ b/internal/datanode/importv2/hash.go @@ -33,7 +33,7 @@ func newHashedData(schema *schemapb.CollectionSchema, channelNum, partitionNum i for i := 0; i < channelNum; i++ { res[i] = make([]*storage.InsertData, partitionNum) for j := 0; j < partitionNum; j++ { - res[i][j], err = storage.NewInsertData(schema) + res[i][j], err = storage.NewInsertDataWithFunctionOutputField(schema) if err != nil { return nil, err } diff --git a/internal/datanode/importv2/task_import.go b/internal/datanode/importv2/task_import.go index 34a255a6a36c3..a365ec1a0c5c1 100644 --- a/internal/datanode/importv2/task_import.go +++ b/internal/datanode/importv2/task_import.go @@ -195,6 +195,12 @@ func (t *ImportTask) importFile(reader importutilv2.Reader) error { if err != nil { return err } + if !importutilv2.IsBackup(t.req.GetOptions()) { + err = RunEmbeddingFunction(t, data) + if err != nil { + return err + } + } hashedData, err := HashData(t, data) if err != nil { return err @@ -236,8 +242,17 @@ func (t *ImportTask) sync(hashedData HashedData) ([]*conc.Future[struct{}], []sy if err != nil { return nil, nil, err } + bm25Stats := make(map[int64]*storage.BM25Stats) + for _, fn := range t.req.GetSchema().GetFunctions() { + if fn.GetType() == schemapb.FunctionType_BM25 { + // BM25 function guarantees single output field + outputSparseFieldId := fn.GetOutputFieldIds()[0] + bm25Stats[outputSparseFieldId] = storage.NewBM25Stats() + bm25Stats[outputSparseFieldId].AppendFieldData(data.Data[outputSparseFieldId].(*storage.SparseFloatVectorFieldData)) + } + } syncTask, err := NewSyncTask(t.ctx, t.allocator, t.metaCaches, t.req.GetTs(), - segmentID, partitionID, t.GetCollectionID(), channel, data, nil) + segmentID, partitionID, t.GetCollectionID(), channel, data, nil, bm25Stats) if err != nil { return nil, nil, err } diff --git a/internal/datanode/importv2/task_l0_import.go b/internal/datanode/importv2/task_l0_import.go index b410782a24f87..ac87d056a7557 100644 --- a/internal/datanode/importv2/task_l0_import.go +++ b/internal/datanode/importv2/task_l0_import.go @@ -227,7 +227,7 @@ func (t *L0ImportTask) syncDelete(delData []*storage.DeleteData) ([]*conc.Future return nil, nil, err } syncTask, err := NewSyncTask(t.ctx, t.allocator, t.metaCaches, t.req.GetTs(), - segmentID, partitionID, t.GetCollectionID(), channel, nil, data) + segmentID, partitionID, t.GetCollectionID(), channel, nil, data, nil) if err != nil { return nil, nil, err } diff --git a/internal/datanode/importv2/util.go b/internal/datanode/importv2/util.go index c725bb1b1abe5..96b2b7bdf8d00 100644 --- a/internal/datanode/importv2/util.go +++ b/internal/datanode/importv2/util.go @@ -34,6 +34,7 @@ import ( "github.com/milvus-io/milvus/internal/flushcommon/syncmgr" "github.com/milvus-io/milvus/internal/proto/datapb" "github.com/milvus-io/milvus/internal/storage" + "github.com/milvus-io/milvus/internal/util/function" "github.com/milvus-io/milvus/pkg/common" "github.com/milvus-io/milvus/pkg/log" "github.com/milvus-io/milvus/pkg/metrics" @@ -52,6 +53,7 @@ func NewSyncTask(ctx context.Context, segmentID, partitionID, collectionID int64, vchannel string, insertData *storage.InsertData, deleteData *storage.DeleteData, + bm25Stats map[int64]*storage.BM25Stats, ) (syncmgr.Task, error) { metaCache := metaCaches[vchannel] if _, ok := metaCache.GetSegmentByID(segmentID); !ok { @@ -94,6 +96,9 @@ func NewSyncTask(ctx context.Context, WithLevel(segmentLevel). WithDataSource(metrics.BulkinsertDataSourceLabel). WithBatchSize(int64(insertData.GetRowNum())) + if bm25Stats != nil { + syncPack.WithBM25Stats(bm25Stats) + } return serializer.EncodeBuffer(ctx, syncPack) } @@ -202,6 +207,49 @@ func AppendSystemFieldsData(task *ImportTask, data *storage.InsertData) error { return nil } +func RunEmbeddingFunction(task *ImportTask, data *storage.InsertData) error { + fns := task.GetSchema().GetFunctions() + for _, fn := range fns { + runner, err := function.NewFunctionRunner(task.GetSchema(), fn) + if err != nil { + return err + } + inputDatas := make([]any, 0, len(fn.InputFieldIds)) + for _, inputFieldID := range fn.InputFieldIds { + inputDatas = append(inputDatas, data.Data[inputFieldID].GetDataRows()) + } + outputFieldData, err := runner.BatchRun(inputDatas...) + if err != nil { + return err + } + for i, outputFieldID := range fn.OutputFieldIds { + outputField := typeutil.GetField(task.GetSchema(), outputFieldID) + // TODO: added support for vector output field only, scalar output field in function is not supported yet + switch outputField.GetDataType() { + case schemapb.DataType_FloatVector: + data.Data[outputFieldID] = outputFieldData[i].(*storage.FloatVectorFieldData) + case schemapb.DataType_BFloat16Vector: + data.Data[outputFieldID] = outputFieldData[i].(*storage.BFloat16VectorFieldData) + case schemapb.DataType_Float16Vector: + data.Data[outputFieldID] = outputFieldData[i].(*storage.Float16VectorFieldData) + case schemapb.DataType_BinaryVector: + data.Data[outputFieldID] = outputFieldData[i].(*storage.BinaryVectorFieldData) + case schemapb.DataType_SparseFloatVector: + sparseArray := outputFieldData[i].(*schemapb.SparseFloatArray) + data.Data[outputFieldID] = &storage.SparseFloatVectorFieldData{ + SparseFloatArray: schemapb.SparseFloatArray{ + Dim: sparseArray.GetDim(), + Contents: sparseArray.GetContents(), + }, + } + default: + return fmt.Errorf("unsupported output data type for embedding function: %s", outputField.GetDataType().String()) + } + } + } + return nil +} + func GetInsertDataRowCount(data *storage.InsertData, schema *schemapb.CollectionSchema) int { fields := lo.KeyBy(schema.GetFields(), func(field *schemapb.FieldSchema) int64 { return field.GetFieldID() diff --git a/internal/storage/insert_data.go b/internal/storage/insert_data.go index 8f41c12fec852..6143d8289e50c 100644 --- a/internal/storage/insert_data.go +++ b/internal/storage/insert_data.go @@ -50,10 +50,14 @@ type InsertData struct { } func NewInsertData(schema *schemapb.CollectionSchema) (*InsertData, error) { - return NewInsertDataWithCap(schema, 0) + return NewInsertDataWithCap(schema, 0, false) } -func NewInsertDataWithCap(schema *schemapb.CollectionSchema, cap int) (*InsertData, error) { +func NewInsertDataWithFunctionOutputField(schema *schemapb.CollectionSchema) (*InsertData, error) { + return NewInsertDataWithCap(schema, 0, true) +} + +func NewInsertDataWithCap(schema *schemapb.CollectionSchema, cap int, withFunctionOutput bool) (*InsertData, error) { if schema == nil { return nil, merr.WrapErrParameterMissing("collection schema") } @@ -69,6 +73,14 @@ func NewInsertDataWithCap(schema *schemapb.CollectionSchema, cap int) (*InsertDa if field.IsPartitionKey && field.GetNullable() { return nil, merr.WrapErrParameterInvalidMsg("partition key field not support nullable") } + if field.IsFunctionOutput { + if field.IsPrimaryKey || field.IsPartitionKey { + return nil, merr.WrapErrParameterInvalidMsg("function output field should not be primary key or partition key") + } + if !withFunctionOutput { + continue + } + } fieldData, err := NewFieldData(field.DataType, field, cap) if err != nil { return nil, err diff --git a/internal/util/importutilv2/binlog/util.go b/internal/util/importutilv2/binlog/util.go index 645d2a1dcdd6e..8f316e960992e 100644 --- a/internal/util/importutilv2/binlog/util.go +++ b/internal/util/importutilv2/binlog/util.go @@ -114,5 +114,6 @@ func verify(schema *schemapb.CollectionSchema, insertLogs map[int64][]string) er fieldID, len(logs), common.RowIDField, len(insertLogs[common.RowIDField]))) } } + // for Function output field, we do not re-run the Function when restoring from a backup. return nil } diff --git a/internal/util/importutilv2/csv/reader_test.go b/internal/util/importutilv2/csv/reader_test.go index 67e29256e0b1c..1f776773ab9e5 100644 --- a/internal/util/importutilv2/csv/reader_test.go +++ b/internal/util/importutilv2/csv/reader_test.go @@ -77,6 +77,24 @@ func (suite *ReaderSuite) run(dataType schemapb.DataType, elemType schemapb.Data }, }, } + if dataType == schemapb.DataType_VarChar { + // Add a BM25 function if data type is VarChar + schema.Fields = append(schema.Fields, &schemapb.FieldSchema{ + FieldID: 103, + Name: "sparse", + DataType: schemapb.DataType_SparseFloatVector, + IsFunctionOutput: true, + }) + schema.Functions = append(schema.Functions, &schemapb.FunctionSchema{ + Id: 1000, + Name: "bm25", + Type: schemapb.FunctionType_BM25, + InputFieldIds: []int64{102}, + InputFieldNames: []string{dataType.String()}, + OutputFieldIds: []int64{103}, + OutputFieldNames: []string{"sparse"}, + }) + } // config // csv separator diff --git a/internal/util/importutilv2/csv/row_parser.go b/internal/util/importutilv2/csv/row_parser.go index c87b0399f5b2e..6605cf4e2e769 100644 --- a/internal/util/importutilv2/csv/row_parser.go +++ b/internal/util/importutilv2/csv/row_parser.go @@ -26,10 +26,20 @@ type rowParser struct { } func NewRowParser(schema *schemapb.CollectionSchema, header []string, nullkey string) (RowParser, error) { - name2Field := lo.KeyBy(schema.GetFields(), - func(field *schemapb.FieldSchema) string { - return field.GetName() - }) + pkField, err := typeutil.GetPrimaryFieldSchema(schema) + if err != nil { + return nil, err + } + dynamicField := typeutil.GetDynamicField(schema) + + name2Field := lo.SliceToMap( + lo.Filter(schema.GetFields(), func(field *schemapb.FieldSchema, _ int) bool { + return !field.GetIsFunctionOutput() && !typeutil.IsAutoPKField(field) && field.GetName() != dynamicField.GetName() + }), + func(field *schemapb.FieldSchema) (string, *schemapb.FieldSchema) { + return field.GetName(), field + }, + ) name2Dim := make(map[string]int) for name, field := range name2Field { @@ -42,20 +52,6 @@ func NewRowParser(schema *schemapb.CollectionSchema, header []string, nullkey st } } - pkField, err := typeutil.GetPrimaryFieldSchema(schema) - if err != nil { - return nil, err - } - - if pkField.GetAutoID() { - delete(name2Field, pkField.GetName()) - } - - dynamicField := typeutil.GetDynamicField(schema) - if dynamicField != nil { - delete(name2Field, dynamicField.GetName()) - } - // check if csv header provides the primary key while it should be auto-generated if pkField.GetAutoID() && lo.Contains(header, pkField.GetName()) { return nil, merr.WrapErrImportFailed( diff --git a/internal/util/importutilv2/json/reader_test.go b/internal/util/importutilv2/json/reader_test.go index 0077686252663..db2257ab65184 100644 --- a/internal/util/importutilv2/json/reader_test.go +++ b/internal/util/importutilv2/json/reader_test.go @@ -101,6 +101,25 @@ func (suite *ReaderSuite) run(dataType schemapb.DataType, elemType schemapb.Data }, } + if dataType == schemapb.DataType_VarChar { + // Add a BM25 function if data type is VarChar + schema.Fields = append(schema.Fields, &schemapb.FieldSchema{ + FieldID: 103, + Name: "sparse", + DataType: schemapb.DataType_SparseFloatVector, + IsFunctionOutput: true, + }) + schema.Functions = append(schema.Functions, &schemapb.FunctionSchema{ + Id: 1000, + Name: "bm25", + Type: schemapb.FunctionType_BM25, + InputFieldIds: []int64{102}, + InputFieldNames: []string{dataType.String()}, + OutputFieldIds: []int64{103}, + OutputFieldNames: []string{"sparse"}, + }) + } + insertData, err := testutil.CreateInsertData(schema, suite.numRows) suite.NoError(err) diff --git a/internal/util/importutilv2/json/row_parser.go b/internal/util/importutilv2/json/row_parser.go index ce5ccb20ca11b..cb1e5b29f9410 100644 --- a/internal/util/importutilv2/json/row_parser.go +++ b/internal/util/importutilv2/json/row_parser.go @@ -62,20 +62,17 @@ func NewRowParser(schema *schemapb.CollectionSchema) (RowParser, error) { if err != nil { return nil, err } + dynamicField := typeutil.GetDynamicField(schema) - name2FieldID := lo.SliceToMap(schema.GetFields(), + name2FieldID := lo.SliceToMap( + lo.Filter(schema.GetFields(), func(field *schemapb.FieldSchema, _ int) bool { + return !field.GetIsFunctionOutput() && !typeutil.IsAutoPKField(field) && field.GetName() != dynamicField.GetName() + }), func(field *schemapb.FieldSchema) (string, int64) { return field.GetName(), field.GetFieldID() - }) - - if pkField.GetAutoID() { - delete(name2FieldID, pkField.GetName()) - } + }, + ) - dynamicField := typeutil.GetDynamicField(schema) - if dynamicField != nil { - delete(name2FieldID, dynamicField.GetName()) - } return &rowParser{ id2Dim: id2Dim, id2Field: id2Field, diff --git a/internal/util/importutilv2/numpy/reader.go b/internal/util/importutilv2/numpy/reader.go index 96a039895080a..9e3dda144df2f 100644 --- a/internal/util/importutilv2/numpy/reader.go +++ b/internal/util/importutilv2/numpy/reader.go @@ -132,24 +132,32 @@ func CreateReaders(ctx context.Context, cm storage.ChunkManager, schema *schemap return name, path }) for _, field := range schema.GetFields() { + path, hasPath := nameToPath[field.GetName()] if field.GetIsPrimaryKey() && field.GetAutoID() { - if _, ok := nameToPath[field.GetName()]; ok { + if hasPath { return nil, merr.WrapErrImportFailed( fmt.Sprintf("the primary key '%s' is auto-generated, no need to provide", field.GetName())) } continue } - if _, ok := nameToPath[field.GetName()]; !ok { + if field.GetIsFunctionOutput() { + if hasPath { + return nil, merr.WrapErrImportFailed( + fmt.Sprintf("field %s is Function output, should not be provided. Provided files: %v", field.GetName(), lo.Values(nameToPath))) + } + continue + } + if !hasPath { if field.GetIsDynamic() { continue } return nil, merr.WrapErrImportFailed( fmt.Sprintf("no file for field: %s, files: %v", field.GetName(), lo.Values(nameToPath))) } - reader, err := cm.Reader(ctx, nameToPath[field.GetName()]) + reader, err := cm.Reader(ctx, path) if err != nil { return nil, merr.WrapErrImportFailed( - fmt.Sprintf("failed to read the file '%s', error: %s", nameToPath[field.GetName()], err.Error())) + fmt.Sprintf("failed to read the file '%s', error: %s", path, err.Error())) } readers[field.GetFieldID()] = reader } diff --git a/internal/util/importutilv2/numpy/reader_test.go b/internal/util/importutilv2/numpy/reader_test.go index 80fae1722ea6b..45c272ccb4d32 100644 --- a/internal/util/importutilv2/numpy/reader_test.go +++ b/internal/util/importutilv2/numpy/reader_test.go @@ -111,6 +111,26 @@ func (suite *ReaderSuite) run(dt schemapb.DataType) { }, }, } + + if dt == schemapb.DataType_VarChar { + // Add a BM25 function if data type is VarChar + schema.Fields = append(schema.Fields, &schemapb.FieldSchema{ + FieldID: 103, + Name: "sparse", + DataType: schemapb.DataType_SparseFloatVector, + IsFunctionOutput: true, + }) + schema.Functions = append(schema.Functions, &schemapb.FunctionSchema{ + Id: 1000, + Name: "bm25", + Type: schemapb.FunctionType_BM25, + InputFieldIds: []int64{102}, + InputFieldNames: []string{dt.String()}, + OutputFieldIds: []int64{103}, + OutputFieldNames: []string{"sparse"}, + }) + } + insertData, err := testutil.CreateInsertData(schema, suite.numRows) suite.NoError(err) fieldIDToField := lo.KeyBy(schema.GetFields(), func(field *schemapb.FieldSchema) int64 { @@ -118,6 +138,9 @@ func (suite *ReaderSuite) run(dt schemapb.DataType) { }) files := make(map[int64]string) for _, field := range schema.GetFields() { + if field.GetIsFunctionOutput() { + continue + } files[field.GetFieldID()] = fmt.Sprintf("%s.npy", field.GetName()) } @@ -452,4 +475,19 @@ func TestCreateReaders(t *testing.T) { } _, err = CreateReaders(ctx, cm, schema, []string{"pk", "vec"}) assert.NoError(t, err) + + // auto id and Function + schema = &schemapb.CollectionSchema{ + Fields: []*schemapb.FieldSchema{ + {Name: "pk", DataType: schemapb.DataType_Int64, IsPrimaryKey: true, AutoID: true}, + {Name: "vec", DataType: schemapb.DataType_FloatVector}, + {Name: "text", DataType: schemapb.DataType_VarChar}, + {Name: "sparse", DataType: schemapb.DataType_SparseFloatVector, IsFunctionOutput: true}, + }, + Functions: []*schemapb.FunctionSchema{ + {Name: "bm25", InputFieldNames: []string{"text"}, OutputFieldNames: []string{"sparse"}}, + }, + } + _, err = CreateReaders(ctx, cm, schema, []string{"vec", "text"}) + assert.NoError(t, err) } diff --git a/internal/util/importutilv2/parquet/reader_test.go b/internal/util/importutilv2/parquet/reader_test.go index 8bb886a80831c..fac4fc0ac7ec3 100644 --- a/internal/util/importutilv2/parquet/reader_test.go +++ b/internal/util/importutilv2/parquet/reader_test.go @@ -146,6 +146,24 @@ func (s *ReaderSuite) run(dataType schemapb.DataType, elemType schemapb.DataType }, }, } + if dataType == schemapb.DataType_VarChar { + // Add a BM25 function if data type is VarChar + schema.Fields = append(schema.Fields, &schemapb.FieldSchema{ + FieldID: 103, + Name: "sparse", + DataType: schemapb.DataType_SparseFloatVector, + IsFunctionOutput: true, + }) + schema.Functions = append(schema.Functions, &schemapb.FunctionSchema{ + Id: 1000, + Name: "bm25", + Type: schemapb.FunctionType_BM25, + InputFieldIds: []int64{102}, + InputFieldNames: []string{dataType.String()}, + OutputFieldIds: []int64{103}, + OutputFieldNames: []string{"sparse"}, + }) + } filePath := fmt.Sprintf("/tmp/test_%d_reader.parquet", rand.Int()) defer os.Remove(filePath) diff --git a/internal/util/importutilv2/parquet/util.go b/internal/util/importutilv2/parquet/util.go index 8b5d1b1987acc..fd9c2371583c9 100644 --- a/internal/util/importutilv2/parquet/util.go +++ b/internal/util/importutilv2/parquet/util.go @@ -82,7 +82,7 @@ func CreateFieldReaders(ctx context.Context, fileReader *pqarrow.FileReader, sch } for _, field := range nameToField { - if typeutil.IsAutoPKField(field) || field.GetIsDynamic() { + if typeutil.IsAutoPKField(field) || field.GetIsDynamic() || field.GetIsFunctionOutput() { continue } if _, ok := crs[field.GetFieldID()]; !ok { @@ -206,10 +206,12 @@ func convertToArrowDataType(field *schemapb.FieldSchema, isArray bool) (arrow.Da } } +// This method is used only by import util and related tests. Returned arrow.Schema +// doesn't include function output fields. func ConvertToArrowSchema(schema *schemapb.CollectionSchema, useNullType bool) (*arrow.Schema, error) { arrFields := make([]arrow.Field, 0) for _, field := range schema.GetFields() { - if typeutil.IsAutoPKField(field) { + if typeutil.IsAutoPKField(field) || field.GetIsFunctionOutput() { continue } arrDataType, err := convertToArrowDataType(field, false) @@ -234,7 +236,7 @@ func isSchemaEqual(schema *schemapb.CollectionSchema, arrSchema *arrow.Schema) e return field.Name }) for _, field := range schema.GetFields() { - if typeutil.IsAutoPKField(field) { + if typeutil.IsAutoPKField(field) || field.GetIsFunctionOutput() { continue } arrField, ok := arrNameToField[field.GetName()] diff --git a/internal/util/testutil/test_util.go b/internal/util/testutil/test_util.go index 3e89b33d155de..900d0784b6e95 100644 --- a/internal/util/testutil/test_util.go +++ b/internal/util/testutil/test_util.go @@ -109,7 +109,7 @@ func CreateInsertData(schema *schemapb.CollectionSchema, rows int, nullPercent . return nil, err } for _, f := range schema.GetFields() { - if f.GetAutoID() { + if f.GetAutoID() || f.IsFunctionOutput { continue } switch f.GetDataType() { @@ -213,7 +213,7 @@ func BuildArrayData(schema *schemapb.CollectionSchema, insertData *storage.Inser mem := memory.NewGoAllocator() columns := make([]arrow.Array, 0, len(schema.Fields)) for _, field := range schema.Fields { - if field.GetIsPrimaryKey() && field.GetAutoID() { + if field.GetIsPrimaryKey() && field.GetAutoID() || field.GetIsFunctionOutput() { continue } fieldID := field.GetFieldID() @@ -531,7 +531,7 @@ func CreateInsertDataRowsForJSON(schema *schemapb.CollectionSchema, insertData * field := fieldIDToField[fieldID] dataType := field.GetDataType() elemType := field.GetElementType() - if field.GetAutoID() { + if field.GetAutoID() || field.IsFunctionOutput { continue } if v.GetRow(i) == nil { @@ -590,11 +590,12 @@ func CreateInsertDataForCSV(schema *schemapb.CollectionSchema, insertData *stora csvData := make([][]string, 0, rowNum+1) header := make([]string, 0) - nameToFields := lo.KeyBy(schema.GetFields(), func(field *schemapb.FieldSchema) string { + fields := lo.Filter(schema.GetFields(), func(field *schemapb.FieldSchema, _ int) bool { + return !field.GetAutoID() && !field.IsFunctionOutput + }) + nameToFields := lo.KeyBy(fields, func(field *schemapb.FieldSchema) string { name := field.GetName() - if !field.GetAutoID() { - header = append(header, name) - } + header = append(header, name) return name }) csvData = append(csvData, header) @@ -606,9 +607,6 @@ func CreateInsertDataForCSV(schema *schemapb.CollectionSchema, insertData *stora value := insertData.Data[field.FieldID] dataType := field.GetDataType() elemType := field.GetElementType() - if field.GetAutoID() { - continue - } // deal with null value if field.GetNullable() && value.GetRow(i) == nil { data = append(data, nullkey) diff --git a/pkg/util/testutils/gen_data.go b/pkg/util/testutils/gen_data.go index 15b39933858ff..bf2f91c7f6911 100644 --- a/pkg/util/testutils/gen_data.go +++ b/pkg/util/testutils/gen_data.go @@ -24,6 +24,7 @@ import ( "math/rand" "sort" "strconv" + "strings" "github.com/x448/float16" @@ -109,8 +110,21 @@ func GenerateVarCharArray(numRows int, maxLen int) []string { func GenerateStringArray(numRows int) []string { ret := make([]string, 0, numRows) + + genSentence := func() string { + words := []string{"hello", "world", "this", "is", "a", "test", "sentence", "milvus", "vector", "database", "search", "engine", "fast", "efficient", "scalable"} + selectedWords := make([]string, rand.Intn(6)+5) // 5 to 10 words + for i := range selectedWords { + selectedWords[i] = words[rand.Intn(len(words))] + } + rand.Shuffle(len(selectedWords), func(i, j int) { + selectedWords[i], selectedWords[j] = selectedWords[j], selectedWords[i] + }) + return strings.Join(selectedWords, " ") + } + for i := 0; i < numRows; i++ { - ret = append(ret, strconv.Itoa(i)) + ret = append(ret, genSentence()) } return ret }