diff --git a/datafusion-java/src/main/java/org/apache/arrow/datafusion/AbstractProxy.java b/datafusion-java/src/main/java/org/apache/arrow/datafusion/AbstractProxy.java index 6effda4..d4c7ec7 100644 --- a/datafusion-java/src/main/java/org/apache/arrow/datafusion/AbstractProxy.java +++ b/datafusion-java/src/main/java/org/apache/arrow/datafusion/AbstractProxy.java @@ -47,6 +47,11 @@ public final long getPointer() { abstract void doClose(long pointer) throws Exception; + // Ensure native library is loaded before any proxy object is used + static { + JNILoader.load(); + } + @Override public final void close() throws Exception { if (closed.compareAndSet(false, true)) { diff --git a/datafusion-java/src/main/java/org/apache/arrow/datafusion/ExecutionOptions.java b/datafusion-java/src/main/java/org/apache/arrow/datafusion/ExecutionOptions.java new file mode 100644 index 0000000..934e8dc --- /dev/null +++ b/datafusion-java/src/main/java/org/apache/arrow/datafusion/ExecutionOptions.java @@ -0,0 +1,100 @@ +package org.apache.arrow.datafusion; + +/** Configures options related to query execution */ +@SuppressWarnings("UnusedReturnValue") +public class ExecutionOptions { + private final SessionConfig config; + + ExecutionOptions(SessionConfig config) { + this.config = config; + } + + /** + * Get execution options related to reading Parquet data + * + * @return {@link ParquetOptions} instance for this config + */ + public ParquetOptions parquet() { + return new ParquetOptions(config); + } + + /** + * Get the batch size + * + * @return batch size + */ + public long batchSize() { + return SessionConfig.getExecutionOptionsBatchSize(config.getPointer()); + } + + /** + * Set the size of batches to use when creating new data batches + * + * @param batchSize the batch size to set + * @return the modified {@link ExecutionOptions} instance + */ + public ExecutionOptions withBatchSize(long batchSize) { + SessionConfig.setExecutionOptionsBatchSize(config.getPointer(), batchSize); + return this; + } + + /** + * Get whether batch coalescing is enabled + * + * @return whether batch coalescing is enabled + */ + public boolean coalesceBatches() { + return SessionConfig.getExecutionOptionsCoalesceBatches(config.getPointer()); + } + + /** + * Set whether to enable batch coalescing + * + * @param enabled whether to enable batch coalescing + * @return the modified {@link ExecutionOptions} instance + */ + public ExecutionOptions withCoalesceBatches(boolean enabled) { + SessionConfig.setExecutionOptionsCoalesceBatches(config.getPointer(), enabled); + return this; + } + + /** + * Get whether statistics collection is enabled + * + * @return whether statistics collection is enabled + */ + public boolean collectStatistics() { + return SessionConfig.getExecutionOptionsCollectStatistics(config.getPointer()); + } + + /** + * Set whether to enable statistics collection + * + * @param enabled whether to enable statistics collection + * @return the modified {@link ExecutionOptions} instance + */ + public ExecutionOptions withCollectStatistics(boolean enabled) { + SessionConfig.setExecutionOptionsCollectStatistics(config.getPointer(), enabled); + return this; + } + + /** + * Get the target number of partitions + * + * @return number of partitions + */ + public long targetPartitions() { + return SessionConfig.getExecutionOptionsTargetPartitions(config.getPointer()); + } + + /** + * Set the target number of partitions + * + * @param targetPartitions the number of partitions to set + * @return the modified {@link ExecutionOptions} instance + */ + public ExecutionOptions withTargetPartitions(long targetPartitions) { + SessionConfig.setExecutionOptionsTargetPartitions(config.getPointer(), targetPartitions); + return this; + } +} diff --git a/datafusion-java/src/main/java/org/apache/arrow/datafusion/ParquetOptions.java b/datafusion-java/src/main/java/org/apache/arrow/datafusion/ParquetOptions.java new file mode 100644 index 0000000..7f49e73 --- /dev/null +++ b/datafusion-java/src/main/java/org/apache/arrow/datafusion/ParquetOptions.java @@ -0,0 +1,143 @@ +package org.apache.arrow.datafusion; + +import java.util.Optional; + +/** Configures options specific to reading Parquet data */ +@SuppressWarnings("UnusedReturnValue") +public class ParquetOptions { + private final SessionConfig config; + + ParquetOptions(SessionConfig config) { + this.config = config; + } + + /** + * Get whether parquet data page level metadata (Page Index) statistics are used + * + * @return whether using the page index is enabled + */ + public boolean enablePageIndex() { + return SessionConfig.getParquetOptionsEnablePageIndex(config.getPointer()); + } + + /** + * Set whether to use parquet data page level metadata (Page Index) statistics to reduce the + * number of rows decoded. + * + * @param enabled whether using the page index is enabled + * @return the modified {@link ParquetOptions} instance + */ + public ParquetOptions withEnablePageIndex(boolean enabled) { + SessionConfig.setParquetOptionsEnablePageIndex(config.getPointer(), enabled); + return this; + } + + /** + * Get whether pruning is enabled, meaning reading row groups will be skipped based on metadata + * + * @return whether pruning is enabled + */ + public boolean pruning() { + return SessionConfig.getParquetOptionsPruning(config.getPointer()); + } + + /** + * Set whether pruning is enabled, meaning reading row groups will be skipped based on metadata + * + * @param enabled whether to enable pruning + * @return the modified {@link ParquetOptions} instance + */ + public ParquetOptions withPruning(boolean enabled) { + SessionConfig.setParquetOptionsPruning(config.getPointer(), enabled); + return this; + } + + /** + * Get whether file metadata is skipped, to avoid schema conflicts + * + * @return whether metadata is skipped + */ + public boolean skipMetadata() { + return SessionConfig.getParquetOptionsSkipMetadata(config.getPointer()); + } + + /** + * Set whether file metadata is skipped, to avoid schema conflicts + * + * @param enabled whether to skip metadata + * @return the modified {@link ParquetOptions} instance + */ + public ParquetOptions withSkipMetadata(boolean enabled) { + SessionConfig.setParquetOptionsSkipMetadata(config.getPointer(), enabled); + return this; + } + + /** + * Get the metadata size hint + * + * @return metadata size hint value + */ + public Optional metadataSizeHint() { + long sizeHint = SessionConfig.getParquetOptionsMetadataSizeHint(config.getPointer()); + return sizeHint < 0 ? Optional.empty() : Optional.of(sizeHint); + } + + /** + * Set the metadata size hint, which is used to attempt to read the full metadata at once rather + * than needing one read to get the metadata size and then a second read for the metadata itself. + * + * @param metadataSizeHint the metadata size hint + * @return the modified {@link ParquetOptions} instance + */ + public ParquetOptions withMetadataSizeHint(Optional metadataSizeHint) { + long value = -1L; + if (metadataSizeHint.isPresent()) { + value = metadataSizeHint.get(); + if (value < 0) { + throw new RuntimeException("metadataSizeHint cannot be negative"); + } + } + SessionConfig.setParquetOptionsMetadataSizeHint(config.getPointer(), value); + return this; + } + + /** + * Get whether filter pushdown is enabled, so filters are applied during parquet decoding + * + * @return whether filter pushdown is enabled + */ + public boolean pushdownFilters() { + return SessionConfig.getParquetOptionsPushdownFilters(config.getPointer()); + } + + /** + * Set whether filter pushdown is enabled, so filters are applied during parquet decoding + * + * @param enabled whether to pushdown filters + * @return the modified {@link ParquetOptions} instance + */ + public ParquetOptions withPushdownFilters(boolean enabled) { + SessionConfig.setParquetOptionsPushdownFilters(config.getPointer(), enabled); + return this; + } + + /** + * Get whether filter reordering is enabled to minimize evaluation cost + * + * @return whether filter reordering is enabled + */ + public boolean reorderFilters() { + return SessionConfig.getParquetOptionsReorderFilters(config.getPointer()); + } + + /** + * Set whether filter reordering is enabled to minimize evaluation cost + * + * @param enabled whether to reorder filters + * @return the modified {@link ParquetOptions} instance + */ + public ParquetOptions withReorderFilters(boolean enabled) { + SessionConfig.setParquetOptionsReorderFilters(config.getPointer(), enabled); + return this; + } +} diff --git a/datafusion-java/src/main/java/org/apache/arrow/datafusion/SessionConfig.java b/datafusion-java/src/main/java/org/apache/arrow/datafusion/SessionConfig.java new file mode 100644 index 0000000..825877e --- /dev/null +++ b/datafusion-java/src/main/java/org/apache/arrow/datafusion/SessionConfig.java @@ -0,0 +1,108 @@ +package org.apache.arrow.datafusion; + +import java.util.function.Consumer; + +/** Configuration for creating a {@link SessionContext} using {@link SessionContexts#withConfig} */ +public class SessionConfig extends AbstractProxy implements AutoCloseable { + /** Create a new default {@link SessionConfig} */ + public SessionConfig() { + super(create()); + } + + /** + * Get options related to query execution + * + * @return {@link ExecutionOptions} instance for this config + */ + public ExecutionOptions executionOptions() { + return new ExecutionOptions(this); + } + + /** + * Get options specific to parsing SQL queries + * + * @return {@link SqlParserOptions} instance for this config + */ + public SqlParserOptions sqlParserOptions() { + return new SqlParserOptions(this); + } + + /** + * Modify this session configuration and then return it, to simplify use in a try-with-resources + * statement + * + * @param configurationCallback Callback used to update the configuration + * @return This {@link SessionConfig} instance after being updated + */ + public SessionConfig withConfiguration(Consumer configurationCallback) { + configurationCallback.accept(this); + return this; + } + + @Override + void doClose(long pointer) { + destroy(pointer); + } + + private static native long create(); + + private static native void destroy(long pointer); + + // ExecutionOptions native methods + + static native long getExecutionOptionsBatchSize(long pointer); + + static native void setExecutionOptionsBatchSize(long pointer, long batchSize); + + static native boolean getExecutionOptionsCoalesceBatches(long pointer); + + static native void setExecutionOptionsCoalesceBatches(long pointer, boolean enabled); + + static native boolean getExecutionOptionsCollectStatistics(long pointer); + + static native void setExecutionOptionsCollectStatistics(long pointer, boolean enabled); + + static native long getExecutionOptionsTargetPartitions(long pointer); + + static native void setExecutionOptionsTargetPartitions(long pointer, long batchSize); + + // ParquetOptions native methods + + static native boolean getParquetOptionsEnablePageIndex(long pointer); + + static native void setParquetOptionsEnablePageIndex(long pointer, boolean enabled); + + static native boolean getParquetOptionsPruning(long pointer); + + static native void setParquetOptionsPruning(long pointer, boolean enabled); + + static native boolean getParquetOptionsSkipMetadata(long pointer); + + static native void setParquetOptionsSkipMetadata(long pointer, boolean enabled); + + static native long getParquetOptionsMetadataSizeHint(long pointer); + + static native void setParquetOptionsMetadataSizeHint(long pointer, long value); + + static native boolean getParquetOptionsPushdownFilters(long pointer); + + static native void setParquetOptionsPushdownFilters(long pointer, boolean enabled); + + static native boolean getParquetOptionsReorderFilters(long pointer); + + static native void setParquetOptionsReorderFilters(long pointer, boolean enabled); + + // SqlParserOptions native methods + + static native boolean getSqlParserOptionsParseFloatAsDecimal(long pointer); + + static native void setSqlParserOptionsParseFloatAsDecimal(long pointer, boolean enabled); + + static native boolean getSqlParserOptionsEnableIdentNormalization(long pointer); + + static native void setSqlParserOptionsEnableIdentNormalization(long pointer, boolean enabled); + + static native String getSqlParserOptionsDialect(long pointer); + + static native void setSqlParserOptionsDialect(long pointer, String dialect); +} diff --git a/datafusion-java/src/main/java/org/apache/arrow/datafusion/SessionContexts.java b/datafusion-java/src/main/java/org/apache/arrow/datafusion/SessionContexts.java index 769737b..7f23cfd 100644 --- a/datafusion-java/src/main/java/org/apache/arrow/datafusion/SessionContexts.java +++ b/datafusion-java/src/main/java/org/apache/arrow/datafusion/SessionContexts.java @@ -1,5 +1,7 @@ package org.apache.arrow.datafusion; +import java.util.function.Consumer; + /** Manages session contexts */ public class SessionContexts { @@ -12,6 +14,14 @@ private SessionContexts() {} */ static native long createSessionContext(); + /** + * Create a new session context using a SessionConfig + * + * @param configPointer pointer to the native session config object to use + * @return native pointer to the created session context + */ + static native long createSessionContextWithConfig(long configPointer); + /** * Destroy a session context * @@ -32,4 +42,29 @@ public static SessionContext create() { long pointer = createSessionContext(); return new DefaultSessionContext(pointer); } + + /** + * Create a new session context using the provided configuration + * + * @param config the configuration for the session + * @return The created context + */ + public static SessionContext withConfig(SessionConfig config) { + long pointer = createSessionContextWithConfig(config.getPointer()); + return new DefaultSessionContext(pointer); + } + + /** + * Create a new session context using the provided callback to configure the session + * + * @param configuration callback to modify the {@link SessionConfig} for the session + * @return The created context + * @throws Exception if an error is encountered closing the session config resource + */ + public static SessionContext withConfig(Consumer configuration) throws Exception { + try (SessionConfig config = new SessionConfig().withConfiguration(configuration)) { + long pointer = createSessionContextWithConfig(config.getPointer()); + return new DefaultSessionContext(pointer); + } + } } diff --git a/datafusion-java/src/main/java/org/apache/arrow/datafusion/SqlParserOptions.java b/datafusion-java/src/main/java/org/apache/arrow/datafusion/SqlParserOptions.java new file mode 100644 index 0000000..9f607b9 --- /dev/null +++ b/datafusion-java/src/main/java/org/apache/arrow/datafusion/SqlParserOptions.java @@ -0,0 +1,71 @@ +package org.apache.arrow.datafusion; + +/** Configures options specific to parsing SQL queries */ +@SuppressWarnings("UnusedReturnValue") +public class SqlParserOptions { + private final SessionConfig config; + + SqlParserOptions(SessionConfig config) { + this.config = config; + } + + /** + * Get whether to parse floats as decimal type + * + * @return whether to parse floats as decimal + */ + public boolean parseFloatAsDecimal() { + return SessionConfig.getSqlParserOptionsParseFloatAsDecimal(config.getPointer()); + } + + /** + * Set whether to parse floats as decimal type + * + * @param enabled whether to parse floats as decimal + * @return the modified {@link SqlParserOptions} instance + */ + public SqlParserOptions withParseFloatAsDecimal(boolean enabled) { + SessionConfig.setSqlParserOptionsParseFloatAsDecimal(config.getPointer(), enabled); + return this; + } + + /** + * Get whether to convert identifiers to lowercase when not quoted + * + * @return whether ident normalization is enabled + */ + public boolean enableIdentNormalization() { + return SessionConfig.getSqlParserOptionsEnableIdentNormalization(config.getPointer()); + } + + /** + * Set whether to convert identifiers to lowercase when not quoted + * + * @param enabled whether ident normalization is enabled + * @return the modified {@link SqlParserOptions} instance + */ + public SqlParserOptions withEnableIdentNormalization(boolean enabled) { + SessionConfig.setSqlParserOptionsEnableIdentNormalization(config.getPointer(), enabled); + return this; + } + + /** + * Get the SQL dialect used + * + * @return the SQL dialect used + */ + public String dialect() { + return SessionConfig.getSqlParserOptionsDialect(config.getPointer()); + } + + /** + * Set the SQL dialect to use + * + * @param dialect the SQL dialect to use + * @return the modified {@link SqlParserOptions} instance + */ + public SqlParserOptions withDialect(String dialect) { + SessionConfig.setSqlParserOptionsDialect(config.getPointer(), dialect); + return this; + } +} diff --git a/datafusion-java/src/test/java/org/apache/arrow/datafusion/TestSessionConfig.java b/datafusion-java/src/test/java/org/apache/arrow/datafusion/TestSessionConfig.java index 55e0daf..bd8c350 100644 --- a/datafusion-java/src/test/java/org/apache/arrow/datafusion/TestSessionConfig.java +++ b/datafusion-java/src/test/java/org/apache/arrow/datafusion/TestSessionConfig.java @@ -3,6 +3,10 @@ import static org.junit.jupiter.api.Assertions.*; import java.nio.file.Path; +import java.util.Optional; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.memory.RootAllocator; +import org.apache.arrow.vector.VectorSchemaRoot; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -28,4 +32,136 @@ public void testRegisterInvalidParquetPath(@TempDir Path tempDir) throws Excepti "Expected an exception to be raised from an IO error"); } } + + @Test + public void testCreateSessionWithConfig() throws Exception { + try (SessionContext context = + SessionContexts.withConfig( + (c) -> c.executionOptions().parquet().withEnablePageIndex(true))) { + // Only testing we can successfully create a session context with the config + } + } + + @Test + public void testParquetOptions() throws Exception { + try (SessionConfig config = new SessionConfig()) { + ParquetOptions parquetOptions = config.executionOptions().parquet(); + + assertTrue(parquetOptions.enablePageIndex()); + parquetOptions.withEnablePageIndex(false); + assertFalse(parquetOptions.enablePageIndex()); + + assertTrue(parquetOptions.pruning()); + parquetOptions.withPruning(false); + assertFalse(parquetOptions.pruning()); + + assertTrue(parquetOptions.skipMetadata()); + parquetOptions.withSkipMetadata(false); + assertFalse(parquetOptions.skipMetadata()); + + assertFalse(parquetOptions.metadataSizeHint().isPresent()); + parquetOptions.withMetadataSizeHint(Optional.of(123L)); + Optional sizeHint = parquetOptions.metadataSizeHint(); + assertTrue(sizeHint.isPresent()); + assertEquals(123L, sizeHint.get()); + parquetOptions.withMetadataSizeHint(Optional.empty()); + assertFalse(parquetOptions.metadataSizeHint().isPresent()); + + assertFalse(parquetOptions.pushdownFilters()); + parquetOptions.withPushdownFilters(true); + assertTrue(parquetOptions.pushdownFilters()); + + assertFalse(parquetOptions.reorderFilters()); + parquetOptions.withReorderFilters(true); + assertTrue(parquetOptions.reorderFilters()); + } + } + + @Test + public void testSqlParserOptions() throws Exception { + try (SessionConfig config = new SessionConfig()) { + SqlParserOptions sqlParserOptions = config.sqlParserOptions(); + + assertFalse(sqlParserOptions.parseFloatAsDecimal()); + sqlParserOptions.withParseFloatAsDecimal(true); + assertTrue(sqlParserOptions.parseFloatAsDecimal()); + + assertTrue(sqlParserOptions.enableIdentNormalization()); + sqlParserOptions.withEnableIdentNormalization(false); + assertFalse(sqlParserOptions.enableIdentNormalization()); + + assertEquals("generic", sqlParserOptions.dialect()); + sqlParserOptions.withDialect("PostgreSQL"); + assertEquals("PostgreSQL", sqlParserOptions.dialect()); + } + } + + @Test + public void testExecutionOptions() throws Exception { + try (SessionConfig config = new SessionConfig()) { + ExecutionOptions executionOptions = config.executionOptions(); + + assertEquals(8192, executionOptions.batchSize()); + executionOptions.withBatchSize(1024); + assertEquals(1024, executionOptions.batchSize()); + + assertTrue(executionOptions.coalesceBatches()); + executionOptions.withCoalesceBatches(false); + assertFalse(executionOptions.coalesceBatches()); + + assertFalse(executionOptions.collectStatistics()); + executionOptions.withCollectStatistics(true); + assertTrue(executionOptions.collectStatistics()); + + long targetPartitions = executionOptions.targetPartitions(); + assertTrue(targetPartitions > 0); + executionOptions.withTargetPartitions(targetPartitions * 2); + assertEquals(targetPartitions * 2, executionOptions.targetPartitions()); + } + } + + @Test + public void testBatchSize(@TempDir Path tempDir) throws Exception { + long rowCount = 1024; + long batchSize = 64; + try (SessionContext context = + SessionContexts.withConfig((conf) -> conf.executionOptions().withBatchSize(batchSize)); + BufferAllocator allocator = new RootAllocator()) { + Path parquetFilePath = tempDir.resolve("data.parquet"); + + String parquetSchema = + "{\"namespace\": \"org.example\"," + + "\"type\": \"record\"," + + "\"name\": \"record_name\"," + + "\"fields\": [" + + " {\"name\": \"x\", \"type\": \"long\"}" + + " ]}"; + + ParquetWriter.writeParquet( + parquetFilePath, + parquetSchema, + 1024, + (i, record) -> { + record.put("x", i); + }); + + context.registerParquet("test", parquetFilePath).join(); + + try (RecordBatchStream stream = + context + .sql("SELECT * FROM test") + .thenComposeAsync(df -> df.executeStream(allocator)) + .join()) { + VectorSchemaRoot root = stream.getVectorSchemaRoot(); + + long rowsReceived = 0; + while (stream.loadNextBatch().join()) { + assertTrue(root.getRowCount() <= batchSize); + rowsReceived += root.getRowCount(); + } + + assertEquals(rowCount, rowsReceived); + } + } + } } diff --git a/datafusion-jni/src/context.rs b/datafusion-jni/src/context.rs index cb4838c..9eafa05 100644 --- a/datafusion-jni/src/context.rs +++ b/datafusion-jni/src/context.rs @@ -1,6 +1,6 @@ use datafusion::datasource::TableProvider; use datafusion::execution::context::SessionContext; -use datafusion::prelude::{CsvReadOptions, ParquetReadOptions}; +use datafusion::prelude::{CsvReadOptions, ParquetReadOptions, SessionConfig}; use jni::objects::{JClass, JObject, JString}; use jni::sys::jlong; use jni::JNIEnv; @@ -134,3 +134,14 @@ pub extern "system" fn Java_org_apache_arrow_datafusion_SessionContexts_createSe let context = SessionContext::new(); Box::into_raw(Box::new(context)) as jlong } + +#[no_mangle] +pub extern "system" fn Java_org_apache_arrow_datafusion_SessionContexts_createSessionContextWithConfig( + _env: JNIEnv, + _class: JClass, + config: jlong, +) -> jlong { + let config = unsafe { &*(config as *const SessionConfig) }; + let context = SessionContext::with_config(config.clone()); + Box::into_raw(Box::new(context)) as jlong +} diff --git a/datafusion-jni/src/lib.rs b/datafusion-jni/src/lib.rs index 5b80be7..9c5568e 100644 --- a/datafusion-jni/src/lib.rs +++ b/datafusion-jni/src/lib.rs @@ -5,6 +5,7 @@ mod listing_options; mod listing_table; mod listing_table_config; mod runtime; +mod session_config; mod stream; mod table_provider; mod util; diff --git a/datafusion-jni/src/session_config.rs b/datafusion-jni/src/session_config.rs new file mode 100644 index 0000000..9ce18aa --- /dev/null +++ b/datafusion-jni/src/session_config.rs @@ -0,0 +1,251 @@ +use datafusion::execution::context::SessionConfig; +use jni::objects::{JClass, JString}; +use jni::sys::{jboolean, jlong}; +use jni::JNIEnv; + +#[no_mangle] +pub extern "system" fn Java_org_apache_arrow_datafusion_SessionConfig_create( + _env: JNIEnv, + _class: JClass, +) -> jlong { + let session_config = Box::new(SessionConfig::new()); + Box::into_raw(session_config) as jlong +} + +#[no_mangle] +pub extern "system" fn Java_org_apache_arrow_datafusion_SessionConfig_destroy( + _env: JNIEnv, + _class: JClass, + pointer: jlong, +) { + let _ = unsafe { Box::from_raw(pointer as *mut SessionConfig) }; +} + +// Helper macros to implement boolean options + +macro_rules! bool_getter { + ($name:ident, $($property_path:ident).+) => { + #[no_mangle] + pub extern "system" fn $name( + _env: JNIEnv, + _class: JClass, + pointer: jlong, + ) -> jboolean { + let config = unsafe { &*(pointer as *const SessionConfig) }; + let property_value = config.options().$($property_path).+; + if property_value { + 1u8 + } else { + 0u8 + } + } + } +} + +macro_rules! bool_setter { + ($name:ident, $($property_path:ident).+) => { + #[no_mangle] + pub extern "system" fn $name( + _env: JNIEnv, + _class: JClass, + pointer: jlong, + enabled: jboolean, + ) { + let config = unsafe { &mut *(pointer as *mut SessionConfig) }; + config.options_mut().$($property_path).+ = enabled != 0u8; + } + } +} + +macro_rules! usize_getter { + ($name:ident, $($property_path:ident).+) => { + #[no_mangle] + pub extern "system" fn $name( + _env: JNIEnv, + _class: JClass, + pointer: jlong, + ) -> jlong { + let config = unsafe { &*(pointer as *const SessionConfig) }; + let property_value = config.options().$($property_path).+; + property_value as jlong + } + } +} + +macro_rules! usize_setter { + ($name:ident, $($property_path:ident).+) => { + #[no_mangle] + pub extern "system" fn $name( + _env: JNIEnv, + _class: JClass, + pointer: jlong, + value: jlong, + ) { + let config = unsafe { &mut *(pointer as *mut SessionConfig) }; + config.options_mut().$($property_path).+ = value as usize; + } + } +} + +// ExecutionOptions + +usize_getter!( + Java_org_apache_arrow_datafusion_SessionConfig_getExecutionOptionsBatchSize, + execution.batch_size +); +usize_setter!( + Java_org_apache_arrow_datafusion_SessionConfig_setExecutionOptionsBatchSize, + execution.batch_size +); + +bool_getter!( + Java_org_apache_arrow_datafusion_SessionConfig_getExecutionOptionsCoalesceBatches, + execution.coalesce_batches +); +bool_setter!( + Java_org_apache_arrow_datafusion_SessionConfig_setExecutionOptionsCoalesceBatches, + execution.coalesce_batches +); + +bool_getter!( + Java_org_apache_arrow_datafusion_SessionConfig_getExecutionOptionsCollectStatistics, + execution.collect_statistics +); +bool_setter!( + Java_org_apache_arrow_datafusion_SessionConfig_setExecutionOptionsCollectStatistics, + execution.collect_statistics +); + +usize_getter!( + Java_org_apache_arrow_datafusion_SessionConfig_getExecutionOptionsTargetPartitions, + execution.target_partitions +); +usize_setter!( + Java_org_apache_arrow_datafusion_SessionConfig_setExecutionOptionsTargetPartitions, + execution.target_partitions +); + +// ParquetOptions + +bool_getter!( + Java_org_apache_arrow_datafusion_SessionConfig_getParquetOptionsEnablePageIndex, + execution.parquet.enable_page_index +); +bool_setter!( + Java_org_apache_arrow_datafusion_SessionConfig_setParquetOptionsEnablePageIndex, + execution.parquet.enable_page_index +); + +bool_getter!( + Java_org_apache_arrow_datafusion_SessionConfig_getParquetOptionsPruning, + execution.parquet.pruning +); +bool_setter!( + Java_org_apache_arrow_datafusion_SessionConfig_setParquetOptionsPruning, + execution.parquet.pruning +); + +bool_getter!( + Java_org_apache_arrow_datafusion_SessionConfig_getParquetOptionsSkipMetadata, + execution.parquet.skip_metadata +); +bool_setter!( + Java_org_apache_arrow_datafusion_SessionConfig_setParquetOptionsSkipMetadata, + execution.parquet.skip_metadata +); + +bool_getter!( + Java_org_apache_arrow_datafusion_SessionConfig_getParquetOptionsPushdownFilters, + execution.parquet.pushdown_filters +); +bool_setter!( + Java_org_apache_arrow_datafusion_SessionConfig_setParquetOptionsPushdownFilters, + execution.parquet.pushdown_filters +); + +bool_getter!( + Java_org_apache_arrow_datafusion_SessionConfig_getParquetOptionsReorderFilters, + execution.parquet.reorder_filters +); +bool_setter!( + Java_org_apache_arrow_datafusion_SessionConfig_setParquetOptionsReorderFilters, + execution.parquet.reorder_filters +); + +#[no_mangle] +pub extern "system" fn Java_org_apache_arrow_datafusion_SessionConfig_getParquetOptionsMetadataSizeHint( + _env: JNIEnv, + _class: JClass, + pointer: jlong, +) -> jlong { + let config = unsafe { &*(pointer as *const SessionConfig) }; + let size_hint = config.options().execution.parquet.metadata_size_hint; + match size_hint { + Some(size_hint) => size_hint as jlong, + None => -1 as jlong, + } +} + +#[no_mangle] +pub extern "system" fn Java_org_apache_arrow_datafusion_SessionConfig_setParquetOptionsMetadataSizeHint( + _env: JNIEnv, + _class: JClass, + pointer: jlong, + value: jlong, +) { + let config = unsafe { &mut *(pointer as *mut SessionConfig) }; + if value >= 0 { + config.options_mut().execution.parquet.metadata_size_hint = Some(value as usize); + } else { + config.options_mut().execution.parquet.metadata_size_hint = None; + } +} + +// SqlParserOptions + +bool_getter!( + Java_org_apache_arrow_datafusion_SessionConfig_getSqlParserOptionsParseFloatAsDecimal, + sql_parser.parse_float_as_decimal +); +bool_setter!( + Java_org_apache_arrow_datafusion_SessionConfig_setSqlParserOptionsParseFloatAsDecimal, + sql_parser.parse_float_as_decimal +); + +bool_getter!( + Java_org_apache_arrow_datafusion_SessionConfig_getSqlParserOptionsEnableIdentNormalization, + sql_parser.enable_ident_normalization +); +bool_setter!( + Java_org_apache_arrow_datafusion_SessionConfig_setSqlParserOptionsEnableIdentNormalization, + sql_parser.enable_ident_normalization +); + +#[no_mangle] +pub extern "system" fn Java_org_apache_arrow_datafusion_SessionConfig_getSqlParserOptionsDialect< + 'local, +>( + env: JNIEnv<'local>, + _class: JClass<'local>, + pointer: jlong, +) -> JString<'local> { + let config = unsafe { &*(pointer as *const SessionConfig) }; + let dialect = &config.options().sql_parser.dialect; + env.new_string(dialect) + .expect("Couldn't create Java string") +} + +#[no_mangle] +pub extern "system" fn Java_org_apache_arrow_datafusion_SessionConfig_setSqlParserOptionsDialect( + mut env: JNIEnv, + _class: JClass, + pointer: jlong, + dialect: JString, +) { + let config = unsafe { &mut *(pointer as *mut SessionConfig) }; + let dialect: String = env + .get_string(&dialect) + .expect("Couldn't get dialect string") + .into(); + config.options_mut().sql_parser.dialect = dialect; +}