Skip to content

Commit

Permalink
feat: configurable read_pool_size (#76)
Browse files Browse the repository at this point in the history
* configurable `read_pool_size`

* optional env var `HQL_READ_POOL_SIZE`
  • Loading branch information
sebadob authored Nov 26, 2024
1 parent c87a1c6 commit 40668e3
Show file tree
Hide file tree
Showing 4 changed files with 38 additions and 4 deletions.
14 changes: 14 additions & 0 deletions config
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,20 @@ HQL_NODES="
# default: false
HQL_LOG_STATEMENTS=true

# The size of the pooled connections for local database reads.
#
# Do not confuse this with a pool size for network databases, as it is much more efficient.
# You cant' really translate between them, because it depends on many factors, but if you
# assume a factor of 10 is a good start. This means, if you needed a (read) pool size of 40
# connections for something like a postgres before, you should start at a `read_pool_size` of
# 4.
#
# Keep in mind that this pool is only used for reads and writes will travel through the Raft
# and have their own dedicated connection.
#
# default: 4
HQL_READ_POOL_SIZE=4

# Enables immediate flush + sync to disk after each Log Store Batch.
# The situations where you would need this are very rare, and you
# should use it with care.
Expand Down
18 changes: 18 additions & 0 deletions hiqlite/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,19 @@ pub struct NodeConfig {
/// The internal cache size for prepared statements. The default is `1024` which could be
/// reduced in very heavily memory-constrained environments.
pub prepared_statement_cache_capacity: usize,
/// The size of the pooled connections for local database reads.
///
/// Do not confuse this with a pool size for network databases, as it is much more efficient.
/// You cant' really translate between them, because it depends on many factors, but if you
/// assume a factor of 10 is a good start. This means, if you needed a (read) pool size of 40
/// connections for something like a postgres before, you should start at a `read_pool_size` of
/// 4.
///
/// Keep in mind that this pool is only used for reads and writes will travel through the Raft
/// and have their own dedicated connection.
///
/// default: 4
pub read_pool_size: usize,
/// Enables immediate flush + sync to disk after each Log Store Batch.
/// The situations where you would need this are very rare, and you should use it with care.
///
Expand Down Expand Up @@ -92,6 +105,7 @@ impl Default for NodeConfig {
filename_db: "hiqlite.db".into(),
log_statements: false,
prepared_statement_cache_capacity: 1024,
read_pool_size: 4,
sync_immediate: false,
raft_config: Self::default_raft_config(10_000),
tls_raft: None,
Expand Down Expand Up @@ -193,6 +207,10 @@ impl NodeConfig {
.parse()
.expect("Cannot parse HQL_LOG_STATEMENTS to u64"),
prepared_statement_cache_capacity: 1024,
read_pool_size: env::var("HQL_READ_POOL_SIZE")
.unwrap_or_else(|_| "4".to_string())
.parse()
.expect("Cannot parse HQL_READ_POOL_SIZE to usize"),
sync_immediate: env::var("HQL_SYNC_IMMEDIATE")
.unwrap_or_else(|_| "false".to_string())
.parse()
Expand Down
1 change: 1 addition & 0 deletions hiqlite/src/store/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ pub(crate) async fn start_raft_db(
node_config.node_id,
node_config.log_statements,
node_config.prepared_statement_cache_capacity,
node_config.read_pool_size,
#[cfg(feature = "s3")]
node_config.s3_config,
)
Expand Down
9 changes: 5 additions & 4 deletions hiqlite/src/store/state_machine/sqlite/state_machine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,7 @@ impl StateMachineSqlite {
this_node: NodeId,
log_statements: bool,
prepared_statement_cache_capacity: usize,
read_pool_size: usize,
#[cfg(feature = "s3")] s3_config: Option<Arc<crate::s3::S3Config>>,
) -> Result<StateMachineSqlite, StorageError<NodeId>> {
// IMPORTANT: Do NOT change the order of the db exists check!
Expand Down Expand Up @@ -156,6 +157,7 @@ impl StateMachineSqlite {
path_db.as_ref(),
filename_db,
prepared_statement_cache_capacity,
read_pool_size,
)
.await
.map_err(|err| StorageError::IO {
Expand Down Expand Up @@ -302,13 +304,12 @@ impl StateMachineSqlite {
path: &str,
filename_db: &str,
prepared_statement_cache_capacity: usize,
pool_size: usize,
) -> Result<SqlitePool, Error> {
let path_full = format!("{}/{}", path, filename_db);

// TODO configurable read pool size
let amount = 4;
let mut conns = Vec::with_capacity(amount);
for _ in 0..amount {
let mut conns = Vec::with_capacity(pool_size);
for _ in 0..pool_size {
let mut conn = Self::connect(
path.to_string(),
filename_db.to_string(),
Expand Down

0 comments on commit 40668e3

Please sign in to comment.