Skip to content

Commit

Permalink
[DOP-21813] Update CHANGELOG
Browse files Browse the repository at this point in the history
  • Loading branch information
dolfinus committed Dec 3, 2024
1 parent e9d7b27 commit d3a5a4b
Show file tree
Hide file tree
Showing 6 changed files with 7 additions and 7 deletions.
2 changes: 1 addition & 1 deletion docs/changelog/0.12.5.rst
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
0.12.5 (2024-12-02)
0.12.5 (2024-12-03)
===================

Improvements
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ def test_clickhouse_reader_snapshot_with_partitioning_mode_hash(spark, processin
# 100 rows per 3 partitions -> each partition should contain about ~33 rows,
# with some variance caused by randomness & hash distribution
min_count_per_partition = 10
max_count_per_partition = 50
max_count_per_partition = 55

count_per_partition = table_df.groupBy(spark_partition_id()).count().collect()
for partition in count_per_partition:
Expand Down Expand Up @@ -365,7 +365,7 @@ def test_clickhouse_reader_snapshot_with_partitioning_mode_mod_date(spark, proce
# 100 rows per 3 partitions -> each partition should contain about ~33 rows,
# with some variance caused by randomness & hash distribution
min_count_per_partition = 10
max_count_per_partition = 50
max_count_per_partition = 55

count_per_partition = table_df.groupBy(spark_partition_id()).count().collect()
for partition in count_per_partition:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ def test_mssql_reader_snapshot_with_partitioning_mode_hash(spark, processing, lo
# 100 rows per 3 partitions -> each partition should contain about ~33 rows,
# with some variance caused by randomness & hash distribution
min_count_per_partition = 10
max_count_per_partition = 50
max_count_per_partition = 55

count_per_partition = table_df.groupBy(spark_partition_id()).count().collect()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ def test_mysql_reader_snapshot_with_partitioning_mode_hash(spark, processing, lo
# 100 rows per 3 partitions -> each partition should contain about ~33 rows,
# with some variance caused by randomness & hash distribution (+- 50% range is wide enough)
min_count_per_partition = 10
max_count_per_partition = 50
max_count_per_partition = 55

count_per_partition = table_df.groupBy(spark_partition_id()).count().collect()
for partition in count_per_partition:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ def test_oracle_reader_snapshot_with_partitioning_mode_hash(spark, processing, l
# 100 rows per 3 partitions -> each partition should contain about ~33 rows,
# with some variance caused by randomness & hash distribution
min_count_per_partition = 10
max_count_per_partition = 50
max_count_per_partition = 55

count_per_partition = table_df.groupBy(spark_partition_id()).count().collect()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -511,7 +511,7 @@ def test_postgres_reader_snapshot_with_partitioning_mode_hash(spark, processing,
# 100 rows per 3 partitions -> each partition should contain about ~33 rows,
# with some variance caused by randomness & hash distribution
min_count_per_partition = 10
max_count_per_partition = 50
max_count_per_partition = 55

count_per_partition = table_df.groupBy(spark_partition_id()).count().collect()

Expand Down

0 comments on commit d3a5a4b

Please sign in to comment.