Skip to content

Commit

Permalink
[UT] Generate the same digest because column names of HashJoin RHS is…
Browse files Browse the repository at this point in the history
… neglected during query cache digest computation (#53755)

Signed-off-by: satanson <[email protected]>
  • Loading branch information
satanson authored Dec 10, 2024
1 parent 00f99f0 commit d76239c
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 0 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -1333,6 +1333,8 @@ private void normalizeConjunctsNonLeft(FragmentNormalizer normalizer, TNormalPla
@Override
public void normalizeConjuncts(FragmentNormalizer normalizer, TNormalPlanNode planNode, List<Expr> conjuncts) {
if (!normalizer.isProcessingLeftNode()) {
// take column names of HashJoin RHS into cache digest computation
associateSlotIdsWithColumns(normalizer, planNode, Optional.empty());
normalizeConjunctsNonLeft(normalizer, planNode);
return;
}
Expand Down
27 changes: 27 additions & 0 deletions fe/fe-core/src/test/java/com/starrocks/planner/QueryCacheTest.java
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,9 @@
import com.starrocks.common.FeConstants;
import com.starrocks.qe.ConnectContext;
import com.starrocks.sql.plan.ExecPlan;
import com.starrocks.sql.util.Util;
import com.starrocks.statistic.StatsConstants;
import com.starrocks.thrift.TCacheParam;
import com.starrocks.utframe.StarRocksAssert;
import com.starrocks.utframe.UtFrameUtils;
import kotlin.text.Charsets;
Expand All @@ -46,6 +48,7 @@
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;

import static com.starrocks.sql.optimizer.statistics.CachedStatisticStorageTest.DEFAULT_CREATE_TABLE_TEMPLATE;

Expand Down Expand Up @@ -1530,4 +1533,28 @@ public void testGroupByDifferentColumnsOnUnpartitionedTable() {
Assert.assertTrue(frag0.isPresent() && frag1.isPresent());
Assert.assertNotEquals(frag0.get().getCacheParam().digest, frag1.get().getCacheParam().digest);
}

@Test
public void testDigestsVaryAsDifferentColumnNames() {
String sqlFmt = "select %s, count(distinct lo_custkey) \n" +
"from lineorder left outer join[broadcast] \n" +
" part on lo_custkey = p_partkey group by %s";

String[] columnNames = new String[]
{"p_mfgr", "p_color", "p_category", "p_brand", "p_type", "p_container"};

List<Optional<PlanFragment>> planFragments = Stream.of(columnNames)
.map(col -> String.format(sqlFmt, col, col))
.map(this::getCachedFragment)
.collect(Collectors.toList());
Assert.assertTrue(planFragments.stream().allMatch(Optional::isPresent));
Set<String> digests = planFragments.stream().map(optFrag -> optFrag
.map(PlanFragment::getCacheParam)
.map(TCacheParam::getDigest)
.map(Util::toHexString))
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.toSet());
Assert.assertEquals(digests.size(), columnNames.length);
}
}

0 comments on commit d76239c

Please sign in to comment.