From bd97ce986892867b0517944a9c7cf476094a329c Mon Sep 17 00:00:00 2001 From: ThreadDao Date: Wed, 3 Jul 2024 14:18:01 +0800 Subject: [PATCH] fix: update err msg and max partition num (#778) Signed-off-by: ThreadDao --- .github/workflows/test-ci.yaml | 3 +- test/common/response_check.go | 2 +- test/common/utils.go | 8 +- test/testcases/index_test.go | 73 ++++++--- test/testcases/load_release_test.go | 22 ++- test/testcases/partition_key_test.go | 4 +- test/testcases/query_test.go | 126 +++++++++++---- test/testcases/search_test.go | 223 +++++++++++++++++++-------- test/testcases/upsert_test.go | 46 +++--- 9 files changed, 358 insertions(+), 149 deletions(-) diff --git a/.github/workflows/test-ci.yaml b/.github/workflows/test-ci.yaml index 9cc05f2b..cb841f9b 100644 --- a/.github/workflows/test-ci.yaml +++ b/.github/workflows/test-ci.yaml @@ -37,7 +37,8 @@ jobs: working-directory: ci/scripts run: | url="https://registry.hub.docker.com/v2/repositories/$IMAGE_REPO/tags?page=1&name=$TAG_PREFIX" - echo "IMAGE_TAG=$(curl -s $url | jq -r '."results"[]["name"] | select(test("amd64$"))' | head -n 1)" >> $GITHUB_ENV + echo "IMAGE_TAG=$(curl -s $url | jq -r '."results"[]["name"]' | head -n 1)" >> $GITHUB_ENV +# echo "IMAGE_TAG=$(curl -s $url | jq -r '."results"[]["name"] | select(test("amd64$"))' | head -n 1)" >> $GITHUB_ENV # echo "IMAGE_TAG=$(./docker_image_find_tag.sh -n ${IMAGE_REPO} -t ${TAG_PREFIX}latest -f ${TAG_PREFIX} -F -L -q)" >> $GITHUB_ENV # export IMAGE_TAG=$IMAGE_TAG # export IMAGE_REPO=$IMAGE_REPO diff --git a/test/common/response_check.go b/test/common/response_check.go index aa118e99..3d58b3d7 100644 --- a/test/common/response_check.go +++ b/test/common/response_check.go @@ -32,7 +32,7 @@ func CheckErr(t *testing.T, actualErr error, expErrNil bool, expErrorMsg ...stri } } if !contains { - t.FailNow() + t.Fatalf("CheckErr failed, actualErr doesn't contains any expErrorMsg, please check test cases!") } } } diff --git a/test/common/utils.go b/test/common/utils.go index 6df077ae..80e6bfb1 100644 --- a/test/common/utils.go +++ b/test/common/utils.go @@ -60,7 +60,7 @@ const ( // const default value from milvus const ( - MaxPartitionNum = 4096 + MaxPartitionNum = 1024 DefaultDynamicFieldName = "$meta" QueryCountFieldName = "count(*)" DefaultPartition = "_default" @@ -1257,7 +1257,7 @@ func GenDynamicFieldData(start int, nb int) []entity.Column { return data } -func MergeColumnsToDynamic(nb int, columns []entity.Column) *entity.ColumnJSONBytes { +func MergeColumnsToDynamic(nb int, columns []entity.Column, columnName string) *entity.ColumnJSONBytes { values := make([][]byte, 0, nb) for i := 0; i < nb; i++ { m := make(map[string]interface{}) @@ -1271,7 +1271,7 @@ func MergeColumnsToDynamic(nb int, columns []entity.Column) *entity.ColumnJSONBy } values = append(values, bs) } - jsonColumn := entity.NewColumnJSONBytes(DefaultDynamicFieldName, values) + jsonColumn := entity.NewColumnJSONBytes(columnName, values) var jsonData []string for i := 0; i < jsonColumn.Len(); i++ { @@ -1320,7 +1320,6 @@ func GenAllFloatIndex(metricTypes ...entity.MetricType) []entity.Index { nlist := 128 var allFloatIndex []entity.Index var allMetricTypes []entity.MetricType - log.Println(metricTypes) if len(metricTypes) == 0 { allMetricTypes = SupportFloatMetricType } else { @@ -1385,6 +1384,7 @@ type InvalidExprStruct struct { } var InvalidExpressions = []InvalidExprStruct{ + // https://github.com/milvus-io/milvus-sdk-go/issues/777 {Expr: "id in [0]", ErrNil: true, ErrMsg: "fieldName(id) not found"}, // not exist field but no error {Expr: "int64 in not [0]", ErrNil: false, ErrMsg: "cannot parse expression"}, // wrong term expr keyword {Expr: "int64 > 10 AND int64 < 100", ErrNil: false, ErrMsg: "cannot parse expression"}, // AND isn't supported diff --git a/test/testcases/index_test.go b/test/testcases/index_test.go index 90f7fc13..fb98229e 100644 --- a/test/testcases/index_test.go +++ b/test/testcases/index_test.go @@ -79,6 +79,7 @@ func TestCreateIndexMultiVectors(t *testing.T) { common.CheckErr(t, err, true) for _, idx := range common.GenAllFloatIndex() { for _, fieldName := range []string{common.DefaultFloat16VecFieldName, common.DefaultBFloat16VecFieldName} { + log.Printf("index name=%s, index type=%v, index params=%v", idx.Name(), idx.IndexType(), idx.Params()) err := mc.CreateIndex(ctx, collName, fieldName, idx, false, client.WithIndexName(fieldName)) common.CheckErr(t, err, true) @@ -360,8 +361,7 @@ func TestCreateScalarIndexVectorField(t *testing.T) { idx := entity.NewScalarIndexWithType(ip) for _, fieldName := range common.AllVectorsFieldsName { err := mc.CreateIndex(ctx, collName, fieldName, idx, false) - common.CheckErr(t, err, false, "STL_SORT are only supported on numeric field", - "TRIE are only supported on varchar field", "INVERTED are not supported on") + common.CheckErr(t, err, false, "metric type not set for vector index") } } for _, fieldName := range common.AllFloatVectorsFieldNames { @@ -547,7 +547,7 @@ func TestCreateBinaryIndexNotSupportedMetricsType(t *testing.T) { // create BinFlat idxBinFlat, _ := entity.NewIndexBinFlat(metricType, 128) err := mc.CreateIndex(ctx, collName, common.DefaultBinaryVecFieldName, idxBinFlat, false, client.WithIndexName("my_index")) - common.CheckErr(t, err, false, "supported: [HAMMING JACCARD SUBSTRUCTURE SUPERSTRUCTURE]") + common.CheckErr(t, err, false, fmt.Sprintf("binary vector index does not support metric type: %v", metricType)) } invalidMetricTypes2 := []entity.MetricType{ @@ -563,7 +563,8 @@ func TestCreateBinaryIndexNotSupportedMetricsType(t *testing.T) { // create BinIvfFlat index idxBinIvfFlat, _ := entity.NewIndexBinIvfFlat(metricType, 128) errIvf := mc.CreateIndex(ctx, collName, common.DefaultBinaryVecFieldName, idxBinIvfFlat, false, client.WithIndexName("my_index2")) - common.CheckErr(t, errIvf, false, fmt.Sprintf("metric type %v not found or not supported", metricType)) + common.CheckErr(t, errIvf, false, fmt.Sprintf("metric type %s not found or not supported, supported: [HAMMING JACCARD]", metricType), + "binary vector index does not support metric type") } } @@ -607,15 +608,18 @@ func TestCreateIndexWithoutIndexTypeParams(t *testing.T) { common.CheckErr(t, err, true) for _, fieldName := range common.AllVectorsFieldsName { - idx, _ := entity.NewIndexAUTOINDEX(entity.COSINE) if fieldName == common.DefaultBinaryVecFieldName { + idx, _ := entity.NewIndexAUTOINDEX(entity.JACCARD) err = mc.CreateIndex(ctx, collName, fieldName, idx, false) - common.CheckErr(t, err, false, "HNSW only support float vector data type") - // create binary index - idxBinary, _ := entity.NewIndexBinIvfFlat(entity.JACCARD, 64) - err = mc.CreateIndex(ctx, collName, fieldName, idxBinary, false) common.CheckErr(t, err, true) + + // describe and check index + indexes, _ := mc.DescribeIndex(ctx, collName, fieldName) + expIndex := entity.NewGenericIndex(fieldName, entity.AUTOINDEX, map[string]string{"metric_type": string(entity.JACCARD)}) + common.CheckIndexResult(t, indexes, expIndex) + } else { + idx, _ := entity.NewIndexAUTOINDEX(entity.COSINE) // create index err = mc.CreateIndex(ctx, collName, fieldName, idx, false) common.CheckErr(t, err, true) @@ -843,25 +847,54 @@ func TestCreateSparseUnsupportedIndex(t *testing.T) { mc.Flush(ctx, collName, false) // create unsupported vector index on sparse field - autoIdx, _ := entity.NewIndexAUTOINDEX(entity.IP) - vectorIndex := append(common.GenAllFloatIndex(entity.IP), autoIdx) + vectorIndex := append(common.GenAllFloatIndex(entity.IP)) for _, idx := range vectorIndex { err := mc.CreateIndex(ctx, collName, common.DefaultSparseVecFieldName, idx, false) - common.CheckErr(t, err, false, "data type should be FloatVector, Float16Vector or BFloat16Vector", - "HNSW only support float vector data type") + common.CheckErr(t, err, false, "data type 104 can't build with this index") } // create scalar index on sparse vector for _, idx := range []entity.Index{ - entity.NewScalarIndex(), entity.NewScalarIndexWithType(entity.Trie), entity.NewScalarIndexWithType(entity.Sorted), entity.NewScalarIndexWithType(entity.Inverted), } { err := mc.CreateIndex(ctx, collName, common.DefaultSparseVecFieldName, idx, false) - common.CheckErr(t, err, false, "TRIE are only supported on varchar field", - "STL_SORT are only supported on numeric field", "HNSW only support float vector data type", - "INVERTED are not supported on SparseFloatVector field") + common.CheckErr(t, err, false, "metric type not set for vector index") + } +} + +// create sparse auto / scalar index +func TestCreateSparseAutoIndex(t *testing.T) { + ctx := createContext(t, time.Second*common.DefaultTimeout) + //connect + mc := createMilvusClient(ctx, t) + + // create collection with all datatype + cp := CollectionParams{CollectionFieldsType: Int64VarcharSparseVec, AutoID: false, EnableDynamicField: true, + ShardsNum: common.DefaultShards, Dim: common.DefaultDim, MaxLength: 300} + collName := createCollection(ctx, t, mc, cp) + + // insert + dp := DataParams{CollectionName: collName, PartitionName: "", CollectionFieldsType: Int64VarcharSparseVec, + start: 0, nb: common.DefaultNb, dim: common.DefaultDim, EnableDynamicField: true, WithRows: false} + _, _ = insertData(ctx, t, mc, dp, common.WithSparseVectorLen(100)) + mc.Flush(ctx, collName, false) + + // create scalar index on sparse vector + autoIdx, _ := entity.NewIndexAUTOINDEX(entity.IP) + for _, idx := range []entity.Index{ + entity.NewScalarIndex(), + autoIdx, + } { + err := mc.CreateIndex(ctx, collName, common.DefaultSparseVecFieldName, idx, false) + common.CheckErr(t, err, true) + idxes, err := mc.DescribeIndex(ctx, collName, common.DefaultSparseVecFieldName) + common.CheckErr(t, err, true) + expIndex := entity.NewGenericIndex(common.DefaultSparseVecFieldName, autoIdx.IndexType(), map[string]string{"index_type": "AUTOINDEX", "metric_type": "IP"}) + common.CheckIndexResult(t, idxes, expIndex) + err = mc.DropIndex(ctx, collName, common.DefaultSparseVecFieldName) + common.CheckErr(t, err, true) } } @@ -924,7 +957,7 @@ func TestCreateIndexNotSupportedField(t *testing.T) { // create index idx, _ := entity.NewIndexHNSW(entity.L2, 8, 96) err := mc.CreateIndex(ctx, collName, common.DefaultFloatFieldName, idx, false) - common.CheckErr(t, err, false, "HNSW only support float vector data type") + common.CheckErr(t, err, false, "can't build hnsw in not vector type") // create scann index indexScann, _ := entity.NewIndexSCANN(entity.L2, 8, true) @@ -989,14 +1022,14 @@ func TestCreateIndexInvalidParams(t *testing.T) { common.CheckErr(t, errScann2, true) err := mc.CreateIndex(ctx, collName, common.DefaultFloatVecFieldName, idxScann, false) common.CheckErr(t, err, false, - fmt.Sprintf("metric type %s not found or not supported, supported: [L2 IP COSINE]", mt)) + fmt.Sprintf("float vector index does not support metric type: %s", mt)) } // invalid flat metric type jaccard for flat index idx, _ := entity.NewIndexFlat(entity.JACCARD) errMetricType := mc.CreateIndex(ctx, collName, common.DefaultFloatVecFieldName, idx, false) common.CheckErr(t, errMetricType, false, - "metric type JACCARD not found or not supported, supported: [L2 IP COSINE]") + "float vector index does not support metric type: JACCARD") } // test create index with nil index diff --git a/test/testcases/load_release_test.go b/test/testcases/load_release_test.go index 80762c02..2076b104 100644 --- a/test/testcases/load_release_test.go +++ b/test/testcases/load_release_test.go @@ -247,16 +247,22 @@ func TestLoadMultiPartitions(t *testing.T) { idx, _ := entity.NewIndexHNSW(entity.L2, 8, 96) mc.CreateIndex(ctx, collName, common.DefaultFloatVecFieldName, idx, false) - // load partition - errLoad := mc.LoadPartitions(ctx, collName, []string{partitionName, common.DefaultPartition}, false) + // load default partition + errLoad := mc.LoadPartitions(ctx, collName, []string{common.DefaultPartition}, false) common.CheckErr(t, errLoad, true) - //query nb from partition - queryIds := entity.NewColumnInt64(common.DefaultIntFieldName, []int64{0, common.DefaultNb}) - queryResultPartition, _ := mc.QueryByPks(ctx, collName, []string{}, queryIds, []string{common.DefaultIntFieldName}) - common.CheckQueryResult(t, queryResultPartition, []entity.Column{ - entity.NewColumnInt64(common.DefaultIntFieldName, []int64{0, common.DefaultNb}), - }) + //query nb from default partition + resDef, _ := mc.Query(ctx, collName, []string{common.DefaultPartition}, "", []string{common.QueryCountFieldName}) + require.EqualValues(t, common.DefaultNb, resDef.GetColumn(common.QueryCountFieldName).(*entity.ColumnInt64).Data()[0]) + + // load partition and query -> actually not loaded + errLoad = mc.LoadPartitions(ctx, collName, []string{partitionName}, false) + common.CheckErr(t, errLoad, true) + resPar, _ := mc.Query(ctx, collName, []string{partitionName}, "", []string{common.QueryCountFieldName}) + require.EqualValues(t, common.DefaultNb, resPar.GetColumn(common.QueryCountFieldName).(*entity.ColumnInt64).Data()[0]) + + res, _ := mc.Query(ctx, collName, []string{}, "", []string{common.QueryCountFieldName}) + require.EqualValues(t, common.DefaultNb*2, res.GetColumn(common.QueryCountFieldName).(*entity.ColumnInt64).Data()[0]) } // test load partitions repeatedly diff --git a/test/testcases/partition_key_test.go b/test/testcases/partition_key_test.go index 1b7ba562..e06804df 100644 --- a/test/testcases/partition_key_test.go +++ b/test/testcases/partition_key_test.go @@ -189,7 +189,7 @@ func TestPartitionKeyInvalidNumPartition(t *testing.T) { numPartitions int64 errMsg string }{ - {common.MaxPartitionNum + 1, "exceeds max configuration (4096)"}, + {common.MaxPartitionNum + 1, fmt.Sprintf("exceeds max configuration (%d)", common.MaxPartitionNum)}, {-1, "the specified partitions should be greater than 0 if partition key is used"}, } for _, npStruct := range invalidNumPartitionStruct { @@ -215,7 +215,7 @@ func TestPartitionKeyNumPartition(t *testing.T) { 1, 128, 64, - 4096, + common.MaxPartitionNum, } for _, numPartitionsValue := range numPartitionsValues { ctx := createContext(t, time.Second*common.DefaultTimeout) diff --git a/test/testcases/query_test.go b/test/testcases/query_test.go index 55bd6ec1..7dd9cd89 100644 --- a/test/testcases/query_test.go +++ b/test/testcases/query_test.go @@ -312,6 +312,99 @@ func TestQueryEmptyOutputFields(t *testing.T) { } } +// test query with an not existed field +func TestQueryOutputNotExistField(t *testing.T) { + ctx := createContext(t, time.Second*common.DefaultTimeout) + // connect + mc := createMilvusClient(ctx, t) + + // create, insert, index + collName, ids := createCollectionWithDataIndex(ctx, t, mc, true, true) + + // Load collection + errLoad := mc.LoadCollection(ctx, collName, false) + common.CheckErr(t, errLoad, true) + + //query + _, errQuery := mc.QueryByPks( + ctx, + collName, + []string{common.DefaultPartition}, + ids.Slice(0, 10), + []string{common.DefaultIntFieldName, "varchar"}, + ) + common.CheckErr(t, errQuery, false, "field varchar not exist") +} + +// test query empty output fields: []string{} -> default pk +// test query empty output fields: []string{""} -> error +// test query with not existed field ["aa"]: error or as dynamic field +// test query with part not existed field ["aa", "$meat"]: error or as dynamic field +// test query with repeated field: ["*", "$meat"], ["floatVec", floatVec"] unique field +func TestQueryEmptyOutputFields2(t *testing.T) { + ctx := createContext(t, time.Second*common.DefaultTimeout) + // connect + mc := createMilvusClient(ctx, t) + + for _, enableDynamic := range []bool{true, false} { + // create collection + cp := CollectionParams{CollectionFieldsType: Int64FloatVec, AutoID: false, EnableDynamicField: enableDynamic, + ShardsNum: common.DefaultShards, Dim: common.DefaultDim} + collName := createCollection(ctx, t, mc, cp) + + // insert + dp := DataParams{CollectionName: collName, PartitionName: "", CollectionFieldsType: Int64FloatVec, + start: 0, nb: common.DefaultNb, dim: common.DefaultDim, EnableDynamicField: enableDynamic} + _, _ = insertData(ctx, t, mc, dp) + + idx, _ := entity.NewIndexHNSW(entity.L2, 8, 96) + _ = mc.CreateIndex(ctx, collName, common.DefaultFloatVecFieldName, idx, false) + + // Load collection + errLoad := mc.LoadCollection(ctx, collName, false) + common.CheckErr(t, errLoad, true) + + //query with empty output fields []string{}-> output "int64" + expr := fmt.Sprintf("%s < 10", common.DefaultIntFieldName) + queryNilOutputs, err := mc.Query(ctx, collName, []string{}, expr, []string{}, client.WithSearchQueryConsistencyLevel(entity.ClStrong)) + common.CheckErr(t, err, true) + common.CheckOutputFields(t, queryNilOutputs, []string{common.DefaultIntFieldName}) + + //query with not existed field -> output field as dynamic or error + fakeName := "aaa" + res2, err2 := mc.Query(ctx, collName, []string{}, expr, []string{fakeName}, client.WithSearchQueryConsistencyLevel(entity.ClStrong)) + if enableDynamic { + common.CheckErr(t, err2, true) + common.CheckOutputFields(t, res2, []string{common.DefaultIntFieldName, fakeName}) + } else { + common.CheckErr(t, err2, false, fmt.Sprintf("%s not exist", fakeName)) + } + + // query with part not existed field ["aa", "$meat"]: error or as dynamic field + res3, err3 := mc.Query(ctx, collName, []string{}, expr, []string{fakeName, common.DefaultDynamicFieldName}, client.WithSearchQueryConsistencyLevel(entity.ClStrong)) + if enableDynamic { + common.CheckErr(t, err3, true) + common.CheckOutputFields(t, res3, []string{common.DefaultIntFieldName, fakeName, common.DefaultDynamicFieldName}) + } else { + common.CheckErr(t, err3, false, "not exist") + } + + // query with repeated field: ["*", "$meat"], ["floatVec", floatVec"] unique field + res4, err4 := mc.Query(ctx, collName, []string{}, expr, []string{"*", common.DefaultDynamicFieldName}, client.WithSearchQueryConsistencyLevel(entity.ClStrong)) + if enableDynamic { + common.CheckErr(t, err4, true) + common.CheckOutputFields(t, res4, []string{common.DefaultIntFieldName, common.DefaultFloatVecFieldName, common.DefaultFloatFieldName, common.DefaultDynamicFieldName}) + } else { + common.CheckErr(t, err4, false, "$meta not exist") + } + + res5, err5 := mc.Query(ctx, collName, []string{}, expr, []string{common.DefaultFloatVecFieldName, common.DefaultFloatVecFieldName, common.DefaultIntFieldName}, client.WithSearchQueryConsistencyLevel(entity.ClStrong)) + + common.CheckErr(t, err5, true) + common.CheckOutputFields(t, res5, []string{common.DefaultIntFieldName, common.DefaultFloatVecFieldName}) + } +} + // test query output int64 and float and floatVector fields func TestQueryOutputFields(t *testing.T) { ctx := createContext(t, time.Second*common.DefaultTimeout) @@ -484,36 +577,12 @@ func TestOutputAllFieldsColumn(t *testing.T) { expColumns = append(expColumns, column.Slice(0, pos)) } if isDynamic { - expColumns = append(expColumns, common.MergeColumnsToDynamic(pos, common.GenDynamicFieldData(0, pos))) + expColumns = append(expColumns, common.MergeColumnsToDynamic(pos, common.GenDynamicFieldData(0, pos), common.DefaultDynamicFieldName)) } common.CheckQueryResult(t, queryResultAll, expColumns) } } -// test query with an not existed field -func TestQueryOutputNotExistField(t *testing.T) { - ctx := createContext(t, time.Second*common.DefaultTimeout) - // connect - mc := createMilvusClient(ctx, t) - - // create, insert, index - collName, ids := createCollectionWithDataIndex(ctx, t, mc, true, true) - - // Load collection - errLoad := mc.LoadCollection(ctx, collName, false) - common.CheckErr(t, errLoad, true) - - //query - _, errQuery := mc.QueryByPks( - ctx, - collName, - []string{common.DefaultPartition}, - ids.Slice(0, 10), - []string{common.DefaultIntFieldName, "varchar"}, - ) - common.CheckErr(t, errQuery, false, "field varchar not exist") -} - // Test query json collection, filter json field, output json field func TestQueryJsonDynamicField(t *testing.T) { t.Parallel() @@ -561,7 +630,7 @@ func TestQueryJsonDynamicField(t *testing.T) { jsonColumn := entity.NewColumnJSONBytes(common.DefaultJSONFieldName, jsonValues) actualColumns := []entity.Column{pkColumn, jsonColumn} if dynamicField { - dynamicColumn := common.MergeColumnsToDynamic(2, common.GenDynamicFieldData(0, 2)) + dynamicColumn := common.MergeColumnsToDynamic(2, common.GenDynamicFieldData(0, 2), common.DefaultDynamicFieldName) actualColumns = append(actualColumns, dynamicColumn) } @@ -594,7 +663,7 @@ func TestQueryJsonDynamicField(t *testing.T) { queryResult, err = mc.QueryByPks( ctx, collName, []string{common.DefaultPartition}, - common.MergeColumnsToDynamic(2, common.GenDynamicFieldData(0, 2)), + common.MergeColumnsToDynamic(2, common.GenDynamicFieldData(0, 2), common.DefaultDynamicFieldName), []string{common.DefaultIntFieldName, common.DefaultJSONFieldName}, ) common.CheckErr(t, err, false, "only int64 and varchar column can be primary key for now") @@ -891,7 +960,6 @@ func TestQueryJsonDynamicExpr(t *testing.T) { line, _ := dynamicNumColumn.GetAsInt64(i) numberData = append(numberData, line) } - require.Equal(t, numberData, []int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}) } // test query and output both json and dynamic field @@ -937,7 +1005,7 @@ func TestQueryJsonDynamicFieldRows(t *testing.T) { j1, _ := json.Marshal(&m1) jsonValues := [][]byte{j0, j1} jsonColumn := entity.NewColumnJSONBytes(common.DefaultJSONFieldName, jsonValues) - dynamicColumn := common.MergeColumnsToDynamic(10, common.GenDynamicFieldData(0, 10)) + dynamicColumn := common.MergeColumnsToDynamic(10, common.GenDynamicFieldData(0, 10), common.DefaultDynamicFieldName) // gen dynamic json column for _, column := range queryResult { diff --git a/test/testcases/search_test.go b/test/testcases/search_test.go index 7cca2adb..6074948e 100644 --- a/test/testcases/search_test.go +++ b/test/testcases/search_test.go @@ -423,6 +423,24 @@ func TestSearchEmptyOutputFields(t *testing.T) { common.CheckErr(t, errSearchExist, false, "not exist") } common.CheckSearchResult(t, searchResPkOutput, common.DefaultNq, common.DefaultTopK) + + res, errSearchExist = mc.Search( + ctx, collName, + []string{}, + "", + []string{""}, + common.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloatVector), + common.DefaultFloatVecFieldName, + entity.L2, + common.DefaultTopK, + sp, + ) + + if enableDynamic { + common.CheckErr(t, errSearchExist, false, "parse output field name failed") + } else { + common.CheckErr(t, errSearchExist, false, "not exist") + } } } @@ -441,31 +459,38 @@ func TestSearchNotExistOutputFields(t *testing.T) { errLoad := mc.LoadCollection(ctx, collName, false) common.CheckErr(t, errLoad, true) - type notExistOutputFields []string + type dynamicOutputFields struct { + outputFields []string + expOutputFields []string + } + dof := []dynamicOutputFields{ + {outputFields: []string{"aaa"}, expOutputFields: []string{"aaa"}}, + {outputFields: []string{"aaa", common.DefaultDynamicFieldName}, expOutputFields: []string{"aaa", common.DefaultDynamicFieldName}}, + {outputFields: []string{"*", common.DefaultDynamicFieldName}, expOutputFields: []string{common.DefaultIntFieldName, common.DefaultFloatVecFieldName, common.DefaultFloatFieldName, common.DefaultDynamicFieldName}}, + } - // search vector output fields not exist, part exist - outputFields := []notExistOutputFields{[]string{"aaa"}, []string{"fields", common.DefaultFloatFieldName}, - []string{"fields", "*"}} sp, _ := entity.NewIndexHNSWSearchParam(74) - for _, fields := range outputFields { - log.Println(fields) - _, errSearch := mc.Search( - ctx, collName, - []string{}, - "", - fields, - common.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloatVector), - common.DefaultFloatVecFieldName, - entity.L2, - common.DefaultTopK, - sp, + + for _, _dof := range dof { + resSearch, err := mc.Search(ctx, collName, []string{}, "", _dof.outputFields, common.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloatVector), + common.DefaultFloatVecFieldName, entity.L2, common.DefaultTopK, sp, ) if enableDynamic { - common.CheckErr(t, errSearch, true) + common.CheckErr(t, err, true) + common.CheckSearchResult(t, resSearch, common.DefaultNq, common.DefaultTopK) + common.CheckOutputFields(t, resSearch[0].Fields, _dof.expOutputFields) } else { - common.CheckErr(t, errSearch, false, "not exist") + common.CheckErr(t, err, false, "not exist") } } + + existedRepeatedFields := []string{common.DefaultIntFieldName, common.DefaultFloatVecFieldName, common.DefaultIntFieldName, common.DefaultFloatVecFieldName} + resSearch2, err2 := mc.Search(ctx, collName, []string{}, "", existedRepeatedFields, common.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloatVector), + common.DefaultFloatVecFieldName, entity.L2, common.DefaultTopK, sp, + ) + common.CheckErr(t, err2, true) + common.CheckSearchResult(t, resSearch2, common.DefaultNq, common.DefaultTopK) + common.CheckOutputFields(t, resSearch2[0].Fields, []string{common.DefaultIntFieldName, common.DefaultFloatVecFieldName}) } } @@ -651,28 +676,47 @@ func TestSearchInvalidVectors(t *testing.T) { // connect mc := createMilvusClient(ctx, t) - // create collection with data - collName, _ := createCollectionWithDataIndex(ctx, t, mc, false, true) + // create -> insert [0, 3000) -> flush -> index -> load + cp := CollectionParams{CollectionFieldsType: AllVectors, AutoID: false, EnableDynamicField: true, + ShardsNum: common.DefaultShards, Dim: common.DefaultDim} - // load collection - errLoad := mc.LoadCollection(ctx, collName, false) - common.CheckErr(t, errLoad, true) + dp := DataParams{DoInsert: true, CollectionFieldsType: AllVectors, start: 0, nb: common.DefaultNb * 2, + dim: common.DefaultDim, EnableDynamicField: true} + + // index params + ips := make([]IndexParams, 4) + var idx entity.Index + for _, fieldName := range common.AllVectorsFieldsName { + if fieldName == common.DefaultBinaryVecFieldName { + idx, _ = entity.NewIndexBinFlat(entity.JACCARD, 64) + } else { + idx, _ = entity.NewIndexFlat(entity.L2) + } + ips = append(ips, IndexParams{BuildIndex: true, Index: idx, FieldName: fieldName, async: false}) + } + + collName := prepareCollection(ctx, t, mc, cp, WithDataParams(dp), WithIndexParams(ips), WithCreateOption(client.WithConsistencyLevel(entity.ClStrong))) type invalidVectorsStruct struct { - vectors []entity.Vector - errMsg string + fieldName string + vectors []entity.Vector + errMsg string } invalidVectors := []invalidVectorsStruct{ // dim not match - {vectors: common.GenSearchVectors(common.DefaultNq, 64, entity.FieldTypeFloatVector), errMsg: "vector dimension mismatch"}, + {fieldName: common.DefaultFloatVecFieldName, vectors: common.GenSearchVectors(common.DefaultNq, 64, entity.FieldTypeFloatVector), errMsg: "vector dimension mismatch"}, + {fieldName: common.DefaultFloat16VecFieldName, vectors: common.GenSearchVectors(common.DefaultNq, 64, entity.FieldTypeFloat16Vector), errMsg: "vector dimension mismatch"}, //vector type not match - {vectors: common.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeBinaryVector), errMsg: "vector type must be the same"}, + {fieldName: common.DefaultFloatVecFieldName, vectors: common.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeBinaryVector), errMsg: "vector type must be the same"}, + {fieldName: common.DefaultBFloat16VecFieldName, vectors: common.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloat16Vector), errMsg: "vector type must be the same"}, // empty vectors - {vectors: []entity.Vector{}, errMsg: "nq [0] is invalid"}, - {vectors: []entity.Vector{entity.FloatVector{}}, errMsg: "vector dimension mismatch"}, + {fieldName: common.DefaultBinaryVecFieldName, vectors: []entity.Vector{}, errMsg: "nq [0] is invalid"}, + {fieldName: common.DefaultFloatVecFieldName, vectors: []entity.Vector{entity.FloatVector{}}, errMsg: "vector dimension mismatch"}, + {vectors: common.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloatVector), errMsg: "multiple anns_fields exist, please specify a anns_field in search_params"}, + {fieldName: "", vectors: common.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloatVector), errMsg: "multiple anns_fields exist, please specify a anns_field in search_params"}, } sp, _ := entity.NewIndexHNSWSearchParam(74) @@ -684,7 +728,7 @@ func TestSearchInvalidVectors(t *testing.T) { "", []string{"*"}, invalidVector.vectors, - common.DefaultFloatVecFieldName, + invalidVector.fieldName, entity.L2, common.DefaultTopK, sp, @@ -1007,6 +1051,55 @@ func TestSearchInvalidExpr(t *testing.T) { } } +// test search with field not existed expr: if dynamic +func TestSearchNotExistedExpr(t *testing.T) { + t.Parallel() + + ctx := createContext(t, time.Second*common.DefaultTimeout) + // connect + mc := createMilvusClient(ctx, t) + + for _, isDynamic := range [2]bool{true, false} { + // create collection + cp := CollectionParams{CollectionFieldsType: Int64FloatVecJSON, AutoID: false, EnableDynamicField: isDynamic, + ShardsNum: common.DefaultShards, Dim: common.DefaultDim} + collName := createCollection(ctx, t, mc, cp) + + // insert + dp := DataParams{CollectionName: collName, PartitionName: "", CollectionFieldsType: Int64FloatVecJSON, + start: 0, nb: common.DefaultNb, dim: common.DefaultDim, EnableDynamicField: isDynamic} + _, _ = insertData(ctx, t, mc, dp) + + idx, _ := entity.NewIndexHNSW(entity.L2, 8, 96) + _ = mc.CreateIndex(ctx, collName, common.DefaultFloatVecFieldName, idx, false) + + // Load collection + errLoad := mc.LoadCollection(ctx, collName, false) + common.CheckErr(t, errLoad, true) + + // search with invalid expr + sp, _ := entity.NewIndexHNSWSearchParam(74) + expr := "id < 10" + res, err := mc.Search( + ctx, collName, + []string{}, + expr, + []string{common.DefaultIntFieldName}, + common.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloatVector), + common.DefaultFloatVecFieldName, + entity.L2, + common.DefaultTopK, + sp, + ) + if isDynamic { + common.CheckErr(t, err, true) + common.CheckSearchResult(t, res, common.DefaultNq, 0) + } else { + common.CheckErr(t, err, false, "not exist") + } + } +} + func TestSearchJsonFieldExpr(t *testing.T) { t.Parallel() @@ -1014,7 +1107,26 @@ func TestSearchJsonFieldExpr(t *testing.T) { // connect mc := createMilvusClient(ctx, t) - for _, dynamicField := range []bool{false} { + exprs := []string{ + "", + fmt.Sprintf("exists %s['number'] ", common.DefaultJSONFieldName), // exists + "json[\"number\"] > 1 and json[\"number\"] < 1000", // > and + fmt.Sprintf("%s[\"number\"] > 10", common.DefaultJSONFieldName), // number > + fmt.Sprintf("%s != 10 ", common.DefaultJSONFieldName), // json != 10 + fmt.Sprintf("%s[\"number\"] < 2000", common.DefaultJSONFieldName), // number < + fmt.Sprintf("%s[\"bool\"] != true", common.DefaultJSONFieldName), // bool != + fmt.Sprintf("%s[\"bool\"] == False", common.DefaultJSONFieldName), // bool == + fmt.Sprintf("%s[\"bool\"] in [true]", common.DefaultJSONFieldName), // bool in + fmt.Sprintf("%s[\"string\"] >= '1' ", common.DefaultJSONFieldName), // string >= + fmt.Sprintf("%s['list'][0] > 200", common.DefaultJSONFieldName), // list filter + fmt.Sprintf("%s['list'] != [2, 3]", common.DefaultJSONFieldName), // json[list] != + fmt.Sprintf("%s > 2000", common.DefaultJSONFieldName), // json > 2000 + fmt.Sprintf("%s like '2%%' ", common.DefaultJSONFieldName), // json like '2%' + fmt.Sprintf("%s[0] > 2000 ", common.DefaultJSONFieldName), // json[0] > 2000 + fmt.Sprintf("%s > 2000.5 ", common.DefaultJSONFieldName), // json > 2000.5 + } + + for _, dynamicField := range []bool{false, true} { // create collection cp := CollectionParams{CollectionFieldsType: Int64FloatVecJSON, AutoID: false, EnableDynamicField: dynamicField, ShardsNum: common.DefaultShards, Dim: common.DefaultDim} @@ -1033,25 +1145,6 @@ func TestSearchJsonFieldExpr(t *testing.T) { errLoad := mc.LoadCollection(ctx, collName, false) common.CheckErr(t, errLoad, true) - exprs := []string{ - "", - fmt.Sprintf("exists %s['number'] ", common.DefaultJSONFieldName), // exists - "json[\"number\"] > 1 and json[\"number\"] < 1000", // > and - fmt.Sprintf("%s[\"number\"] > 10", common.DefaultJSONFieldName), // number > - fmt.Sprintf("%s != 10 ", common.DefaultJSONFieldName), // json != 10 - fmt.Sprintf("%s[\"number\"] < 2000", common.DefaultJSONFieldName), // number < - fmt.Sprintf("%s[\"bool\"] != true", common.DefaultJSONFieldName), // bool != - fmt.Sprintf("%s[\"bool\"] == False", common.DefaultJSONFieldName), // bool == - fmt.Sprintf("%s[\"bool\"] in [true]", common.DefaultJSONFieldName), // bool in - fmt.Sprintf("%s[\"string\"] >= '1' ", common.DefaultJSONFieldName), // string >= - fmt.Sprintf("%s['list'][0] > 200", common.DefaultJSONFieldName), // list filter - fmt.Sprintf("%s['list'] != [2, 3]", common.DefaultJSONFieldName), // json[list] != - fmt.Sprintf("%s > 2000", common.DefaultJSONFieldName), // json > 2000 - fmt.Sprintf("%s like '2%%' ", common.DefaultJSONFieldName), // json like '2%' - fmt.Sprintf("%s[0] > 2000 ", common.DefaultJSONFieldName), // json[0] > 2000 - fmt.Sprintf("%s > 2000.5 ", common.DefaultJSONFieldName), // json > 2000.5 - } - // search with jsonField expr key datatype and json data type mismatch sp, _ := entity.NewIndexHNSWSearchParam(74) for _, expr := range exprs { @@ -1082,6 +1175,17 @@ func TestSearchDynamicFieldExpr(t *testing.T) { // connect mc := createMilvusClient(ctx, t) + exprs := []string{ + "", + "exists dynamicNumber", // exist without dynamic fieldName + fmt.Sprintf("exists %s[\"dynamicNumber\"]", common.DefaultDynamicFieldName), // exist with fieldName + fmt.Sprintf("%s[\"dynamicNumber\"] > 10", common.DefaultDynamicFieldName), // int expr with fieldName + fmt.Sprintf("%s[\"dynamicBool\"] == true", common.DefaultDynamicFieldName), // bool with fieldName + "dynamicBool == False", // bool without fieldName + fmt.Sprintf("%s['dynamicString'] == '1'", common.DefaultDynamicFieldName), // string with fieldName + "dynamicString != \"2\" ", // string without fieldName + } + for _, withRows := range []bool{true, false} { // create collection cp := CollectionParams{CollectionFieldsType: Int64FloatVecJSON, AutoID: false, EnableDynamicField: true, @@ -1101,17 +1205,6 @@ func TestSearchDynamicFieldExpr(t *testing.T) { errLoad := mc.LoadCollection(ctx, collName, false) common.CheckErr(t, errLoad, true) - exprs := []string{ - "", - "exists dynamicNumber", // exist without dynamic fieldName - fmt.Sprintf("exists %s[\"dynamicNumber\"]", common.DefaultDynamicFieldName), // exist with fieldName - fmt.Sprintf("%s[\"dynamicNumber\"] > 10", common.DefaultDynamicFieldName), // int expr with fieldName - fmt.Sprintf("%s[\"dynamicBool\"] == true", common.DefaultDynamicFieldName), // bool with fieldName - "dynamicBool == False", // bool without fieldName - fmt.Sprintf("%s['dynamicString'] == '1'", common.DefaultDynamicFieldName), // string with fieldName - "dynamicString != \"2\" ", // string without fieldName - } - // search with jsonField expr key datatype and json data type mismatch sp, _ := entity.NewIndexHNSWSearchParam(74) for _, expr := range exprs { @@ -1706,7 +1799,6 @@ func TestSearchSparseVector(t *testing.T) { // test search with invalid sparse vector func TestSearchInvalidSparseVector(t *testing.T) { - t.Skip("https://github.com/milvus-io/milvus/issues/32368") t.Parallel() idxInverted := entity.NewGenericIndex(common.DefaultSparseVecFieldName, "SPARSE_INVERTED_INDEX", map[string]string{"drop_ratio_build": "0.2", "metric_type": "IP"}) idxWand := entity.NewGenericIndex(common.DefaultSparseVecFieldName, "SPARSE_WAND", map[string]string{"drop_ratio_build": "0.3", "metric_type": "IP"}) @@ -1737,10 +1829,9 @@ func TestSearchInvalidSparseVector(t *testing.T) { vector1, err := entity.NewSliceSparseEmbedding([]uint32{}, []float32{}) common.CheckErr(t, err, true) - searchRes, errSearch := mc.Search(ctx, collName, []string{}, "", []string{"*"}, []entity.Vector{vector1}, common.DefaultSparseVecFieldName, + _, errSearch = mc.Search(ctx, collName, []string{}, "", []string{"*"}, []entity.Vector{vector1}, common.DefaultSparseVecFieldName, entity.IP, common.DefaultTopK, sp) - common.CheckErr(t, errSearch, true) - common.CheckSearchResult(t, searchRes, 1, 0) + common.CheckErr(t, errSearch, false, "Sparse row data should not be empty") positions := make([]uint32, 100) values := make([]float32, 100) @@ -1751,7 +1842,7 @@ func TestSearchInvalidSparseVector(t *testing.T) { vector, _ := entity.NewSliceSparseEmbedding(positions, values) _, errSearch1 := mc.Search(ctx, collName, []string{}, "", []string{"*"}, []entity.Vector{vector}, common.DefaultSparseVecFieldName, entity.IP, common.DefaultTopK, sp) - common.CheckErr(t, errSearch1, false, "unsorted or same indices in sparse float vector") + common.CheckErr(t, errSearch1, false, "Invalid sparse row: id should be strict ascending") } } diff --git a/test/testcases/upsert_test.go b/test/testcases/upsert_test.go index e51c368a..b1fa243f 100644 --- a/test/testcases/upsert_test.go +++ b/test/testcases/upsert_test.go @@ -50,8 +50,8 @@ func TestUpsert(t *testing.T) { // delete some pk mc.Delete(ctx, collName, "", "int64 < 10") - resSet, err = mc.Query(ctx, collName, []string{}, "int64 < 10", []string{}) - require.Zero(t, resSet[0].Len()) + resSet1, _ := mc.Query(ctx, collName, []string{}, "int64 < 10", []string{}) + require.Zero(t, resSet1[0].Len()) // upsert part deleted(not exist) pk and part existed pk [5, 15) data = common.GenAllFieldsData(5, upsertNb, common.DefaultDim) @@ -219,12 +219,15 @@ func TestUpsertNotExistCollectionPartition(t *testing.T) { mc := createMilvusClient(ctx, t) // create default collection with autoID true - collName := createDefaultCollection(ctx, t, mc, true, common.DefaultShards) + collName := createDefaultCollection(ctx, t, mc, false, common.DefaultShards) // upsert not exist partition - _, floatColumn, vecColumn := common.GenDefaultColumnData(0, common.DefaultNb, common.DefaultDim) + pkColumn, floatColumn, vecColumn := common.GenDefaultColumnData(0, common.DefaultNb, common.DefaultDim) _, errUpsert := mc.Upsert(ctx, collName, "aaa", floatColumn, vecColumn) - common.CheckErr(t, errUpsert, false, "can not assign primary field data when auto id enabled") + common.CheckErr(t, errUpsert, false, "field int64 not passed") + + _, errUpsert = mc.Upsert(ctx, collName, "aaa", pkColumn, floatColumn, vecColumn) + common.CheckErr(t, errUpsert, false, "partition not found[partition=aaa]") // upsert not exist collection _, errUpsert = mc.Upsert(ctx, "aaa", "", floatColumn, vecColumn) @@ -323,18 +326,18 @@ func TestUpsertDynamicField(t *testing.T) { // verify that dynamic field exists upsertNb := 10 - resSet, err := mc.Query(ctx, collName, []string{}, fmt.Sprintf("%s < %d", common.DefaultDynamicNumberField, upsertNb), + resSet1, _ := mc.Query(ctx, collName, []string{}, fmt.Sprintf("%s < %d", common.DefaultDynamicNumberField, upsertNb), []string{common.DefaultDynamicNumberField}) - require.Equal(t, upsertNb, resSet[0].Len()) + require.Equal(t, upsertNb, resSet1[0].Len()) // 1. upsert exist pk without dynamic column intColumn, floatColumn, vecColumn := common.GenDefaultColumnData(0, upsertNb, common.DefaultDim) - _, err = mc.Upsert(ctx, collName, "", intColumn, floatColumn, vecColumn) + _, err := mc.Upsert(ctx, collName, "", intColumn, floatColumn, vecColumn) common.CheckErr(t, err, true) // query and gets empty - resSet, err = mc.Query(ctx, collName, []string{}, fmt.Sprintf("%s < %d", common.DefaultDynamicNumberField, upsertNb), []string{common.DefaultDynamicNumberField}) - require.Zero(t, resSet[0].Len()) + resSet2, _ := mc.Query(ctx, collName, []string{}, fmt.Sprintf("%s < %d", common.DefaultDynamicNumberField, upsertNb), []string{common.DefaultDynamicNumberField}) + require.Zero(t, resSet2[0].Len()) // 2. upsert not exist pk with dynamic column -> field dynamicNumber does not exist in collection intColumn2, floatColumn2, vecColumn2 := common.GenDefaultColumnData(common.DefaultNb, upsertNb, common.DefaultDim) @@ -343,8 +346,8 @@ func TestUpsertDynamicField(t *testing.T) { common.CheckErr(t, err, true) // query and gets empty dynamic field - resSet, err = mc.Query(ctx, collName, []string{}, fmt.Sprintf("%s >= %d", common.DefaultDynamicNumberField, common.DefaultNb), []string{common.QueryCountFieldName}) - require.Equal(t, int64(upsertNb), resSet.GetColumn(common.QueryCountFieldName).(*entity.ColumnInt64).Data()[0]) + resSet3, _ := mc.Query(ctx, collName, []string{}, fmt.Sprintf("%s >= %d", common.DefaultDynamicNumberField, common.DefaultNb), []string{common.QueryCountFieldName}) + require.Equal(t, int64(upsertNb), resSet3.GetColumn(common.QueryCountFieldName).(*entity.ColumnInt64).Data()[0]) } func TestUpsertPartitionKeyCollection(t *testing.T) { @@ -382,16 +385,22 @@ func TestUpsertPartitionKeyCollection(t *testing.T) { // upsert data partition key field [nb, nb*2) partitionKeyColumn2 := common.GenColumnData(common.DefaultNb, common.DefaultNb, entity.FieldTypeInt64, partitionKeyFieldName) - mc.Upsert(ctx, schema.CollectionName, "", pkColumn, floatColumn, vecColumn, partitionKeyColumn2) + _, _, vecColumn2 := common.GenDefaultColumnData(0, common.DefaultNb, common.DefaultDim) + _, err = mc.Upsert(ctx, schema.CollectionName, "", pkColumn, floatColumn, vecColumn2, partitionKeyColumn2) + common.CheckErr(t, err, true) // verify upsert - resSet, err := mc.Query(ctx, schema.CollectionName, []string{}, fmt.Sprintf("%s >= %d", partitionKeyFieldName, common.DefaultNb), + resSet, _ := mc.Query(ctx, schema.CollectionName, []string{}, fmt.Sprintf("%s >= %d", partitionKeyFieldName, common.DefaultNb), []string{common.QueryCountFieldName}) require.Equal(t, int64(common.DefaultNb), resSet.GetColumn(common.QueryCountFieldName).(*entity.ColumnInt64).Data()[0]) - resSet, err = mc.Query(ctx, schema.CollectionName, []string{}, fmt.Sprintf("%s < %d", partitionKeyFieldName, common.DefaultNb), + resSet1, _ := mc.Query(ctx, schema.CollectionName, []string{}, fmt.Sprintf("%s < %d", partitionKeyFieldName, common.DefaultNb), []string{common.QueryCountFieldName}) - require.Equal(t, int64(0), resSet.GetColumn(common.QueryCountFieldName).(*entity.ColumnInt64).Data()[0]) + require.Equal(t, int64(0), resSet1.GetColumn(common.QueryCountFieldName).(*entity.ColumnInt64).Data()[0]) + + resSet2, _ := mc.Query(ctx, schema.CollectionName, []string{}, fmt.Sprintf("%s < %d", common.DefaultIntFieldName, 10), + []string{"*"}) + common.CheckQueryResult(t, resSet2, []entity.Column{pkColumn.Slice(0, 10), floatColumn.Slice(0, 10), partitionKeyColumn2.Slice(0, 10), vecColumn2.Slice(0, 10)}) } func TestUpsertWithoutLoading(t *testing.T) { @@ -423,10 +432,11 @@ func TestUpsertWithoutLoading(t *testing.T) { mc.CreateIndex(ctx, collName, common.DefaultFloatVecFieldName, idx, false) mc.LoadCollection(ctx, collName, false) err = mc.CreateIndex(ctx, collName, common.DefaultFloatVecFieldName, idx, false) + common.CheckErr(t, err, true) // query and verify - resSet, err := mc.QueryByPks(ctx, collName, []string{}, intColumn, + resSet, err1 := mc.QueryByPks(ctx, collName, []string{}, intColumn, []string{common.DefaultFloatVecFieldName}) - common.CheckErr(t, err, true) + common.CheckErr(t, err1, true) require.ElementsMatch(t, vecColumn.(*entity.ColumnFloatVector).Data()[:upsertNb], resSet.GetColumn(common.DefaultFloatVecFieldName).(*entity.ColumnFloatVector).Data()) }