+ * This is mainly useful for integration tests that run outside a servlet context, + * yet require a persistence context setup with an external database. + * + * @param pmf The {@link JDOPersistenceManagerFactory} to set + * @throws IllegalStateException When the {@link JDOPersistenceManagerFactory} was already initialized + * @since 2.1.0 + */ + @SuppressWarnings("unused") + public static void setJdoPersistenceManagerFactory(final JDOPersistenceManagerFactory pmf) { + if (PersistenceManagerFactory.pmf != null) { + throw new IllegalStateException("The PersistenceManagerFactory can only be set when it hasn't been initialized yet."); + } + + PersistenceManagerFactory.pmf = pmf; + } + + /** + * Closes the {@link JDOPersistenceManagerFactory} and removes any reference to it. + *
+ * This method should be called in the {@code tearDown} method of unit- and integration
+ * tests that interact with the persistence layer.
+ *
+ * @since 2.1.0
+ */
+ public static void tearDown() {
+ if (pmf != null) {
+ pmf.close();
+ pmf = null;
+ }
+ }
+
+ private void registerDataNucleusMetrics(final JDOPersistenceManagerFactory pmf) {
+ FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "datastore_reads_total", pmf,
+ p -> p.getNucleusContext().getStatistics().getNumberOfDatastoreReads())
+ .description("Total number of read operations from the datastore")
+ .register(Metrics.getRegistry());
+
+ FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "datastore_writes_total", pmf,
+ p -> p.getNucleusContext().getStatistics().getNumberOfDatastoreWrites())
+ .description("Total number of write operations to the datastore")
+ .register(Metrics.getRegistry());
+
+ FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "object_fetches_total", pmf,
+ p -> p.getNucleusContext().getStatistics().getNumberOfObjectFetches())
+ .description("Total number of objects fetched from the datastore")
+ .register(Metrics.getRegistry());
+
+ FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "object_inserts_total", pmf,
+ p -> p.getNucleusContext().getStatistics().getNumberOfObjectInserts())
+ .description("Total number of objects inserted into the datastore")
+ .register(Metrics.getRegistry());
+
+ FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "object_updates_total", pmf,
+ p -> p.getNucleusContext().getStatistics().getNumberOfObjectUpdates())
+ .description("Total number of objects updated in the datastore")
+ .register(Metrics.getRegistry());
+
+ FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "object_deletes_total", pmf,
+ p -> p.getNucleusContext().getStatistics().getNumberOfObjectDeletes())
+ .description("Total number of objects deleted from the datastore")
+ .register(Metrics.getRegistry());
+
+ Gauge.builder(DATANUCLEUS_METRICS_PREFIX + "query_execution_time_ms_avg", pmf,
+ p -> p.getNucleusContext().getStatistics().getQueryExecutionTimeAverage())
+ .description("Average query execution time in milliseconds")
+ .register(Metrics.getRegistry());
+
+ Gauge.builder(DATANUCLEUS_METRICS_PREFIX + "queries_active", pmf,
+ p -> p.getNucleusContext().getStatistics().getQueryActiveTotalCount())
+ .description("Number of currently active queries")
+ .register(Metrics.getRegistry());
+
+ FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "queries_executed_total", pmf,
+ p -> p.getNucleusContext().getStatistics().getQueryExecutionTotalCount())
+ .description("Total number of executed queries")
+ .register(Metrics.getRegistry());
+
+ FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "queries_failed_total", pmf,
+ p -> p.getNucleusContext().getStatistics().getQueryErrorTotalCount())
+ .description("Total number of queries that completed with an error")
+ .register(Metrics.getRegistry());
+
+ Gauge.builder(DATANUCLEUS_METRICS_PREFIX + "transaction_execution_time_ms_avg", pmf,
+ p -> p.getNucleusContext().getStatistics().getTransactionExecutionTimeAverage())
+ .description("Average transaction execution time in milliseconds")
+ .register(Metrics.getRegistry());
+
+ FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "transactions_active", pmf,
+ p -> p.getNucleusContext().getStatistics().getTransactionActiveTotalCount())
+ .description("Number of currently active transactions")
+ .register(Metrics.getRegistry());
+
+ FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "transactions_total", pmf,
+ p -> p.getNucleusContext().getStatistics().getTransactionTotalCount())
+ .description("Total number of transactions")
+ .register(Metrics.getRegistry());
+
+ FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "transactions_committed_total", pmf,
+ p -> p.getNucleusContext().getStatistics().getTransactionCommittedTotalCount())
+ .description("Total number of committed transactions")
+ .register(Metrics.getRegistry());
+
+ FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "transactions_rolledback_total", pmf,
+ p -> p.getNucleusContext().getStatistics().getTransactionRolledBackTotalCount())
+ .description("Total number of rolled-back transactions")
+ .register(Metrics.getRegistry());
+
+ // This number does not necessarily equate the number of physical connections.
+ // It resembles the number of active connections MANAGED BY DATANUCLEUS.
+ // The number of connections reported by connection pool metrics will differ.
+ Gauge.builder(DATANUCLEUS_METRICS_PREFIX + "connections_active", pmf,
+ p -> p.getNucleusContext().getStatistics().getConnectionActiveCurrent())
+ .description("Number of currently active managed datastore connections")
+ .register(Metrics.getRegistry());
+
+ Gauge.builder(DATANUCLEUS_METRICS_PREFIX + "cache_second_level_entries", pmf,
+ p -> p.getNucleusContext().getLevel2Cache().getSize())
+ .description("Number of entries in the second level cache")
+ .register(Metrics.getRegistry());
+
+ Gauge.builder(DATANUCLEUS_METRICS_PREFIX + "cache_query_generic_compilation_entries", pmf,
+ p -> p.getQueryGenericCompilationCache().size())
+ .description("Number of entries in the generic query compilation cache")
+ .register(Metrics.getRegistry());
+
+ Gauge.builder(DATANUCLEUS_METRICS_PREFIX + "cache_query_datastore_compilation_entries", pmf,
+ p -> p.getQueryDatastoreCompilationCache().size())
+ .description("Number of entries in the datastore query compilation cache")
+ .register(Metrics.getRegistry());
+
+ // Note: The query results cache is disabled per default.
+ Gauge.builder(DATANUCLEUS_METRICS_PREFIX + "cache_query_result_entries", pmf,
+ p -> p.getQueryCache().getQueryCache().size())
+ .description("Number of entries in the query result cache")
+ .register(Metrics.getRegistry());
+ }
+
+ private DataSource createTxPooledDataSource() {
+ final var hikariConfig = createBaseHikariConfig("transactional");
+ hikariConfig.setMaximumPoolSize(getConfigPropertyWithFallback(
+ Config.AlpineKey.DATABASE_POOL_TX_MAX_SIZE,
+ Config.AlpineKey.DATABASE_POOL_MAX_SIZE,
+ Config.getInstance()::getPropertyAsInt
+ ));
+ hikariConfig.setMinimumIdle(getConfigPropertyWithFallback(
+ Config.AlpineKey.DATABASE_POOL_TX_MIN_IDLE,
+ Config.AlpineKey.DATABASE_POOL_MIN_IDLE,
+ Config.getInstance()::getPropertyAsInt
+ ));
+ hikariConfig.setMaxLifetime(getConfigPropertyWithFallback(
+ Config.AlpineKey.DATABASE_POOL_TX_MAX_LIFETIME,
+ Config.AlpineKey.DATABASE_POOL_MAX_LIFETIME,
+ Config.getInstance()::getPropertyAsInt
+ ));
+ hikariConfig.setIdleTimeout(getConfigPropertyWithFallback(
+ Config.AlpineKey.DATABASE_POOL_TX_IDLE_TIMEOUT,
+ Config.AlpineKey.DATABASE_POOL_IDLE_TIMEOUT,
+ Config.getInstance()::getPropertyAsInt
+ ));
+ hikariConfig.setKeepaliveTime(getConfigPropertyWithFallback(
+ Config.AlpineKey.DATABASE_POOL_TX_KEEPALIVE_INTERVAL,
+ Config.AlpineKey.DATABASE_POOL_KEEPALIVE_INTERVAL,
+ Config.getInstance()::getPropertyAsInt
+ ));
+ return new HikariDataSource(hikariConfig);
+ }
+
+ private DataSource createNonTxPooledDataSource() {
+ final var hikariConfig = createBaseHikariConfig("non-transactional");
+ hikariConfig.setMaximumPoolSize(getConfigPropertyWithFallback(
+ Config.AlpineKey.DATABASE_POOL_NONTX_MAX_SIZE,
+ Config.AlpineKey.DATABASE_POOL_MAX_SIZE,
+ Config.getInstance()::getPropertyAsInt
+ ));
+ hikariConfig.setMinimumIdle(getConfigPropertyWithFallback(
+ Config.AlpineKey.DATABASE_POOL_NONTX_MIN_IDLE,
+ Config.AlpineKey.DATABASE_POOL_MIN_IDLE,
+ Config.getInstance()::getPropertyAsInt
+ ));
+ hikariConfig.setMaxLifetime(getConfigPropertyWithFallback(
+ Config.AlpineKey.DATABASE_POOL_NONTX_MAX_LIFETIME,
+ Config.AlpineKey.DATABASE_POOL_MAX_LIFETIME,
+ Config.getInstance()::getPropertyAsInt
+ ));
+ hikariConfig.setIdleTimeout(getConfigPropertyWithFallback(
+ Config.AlpineKey.DATABASE_POOL_NONTX_IDLE_TIMEOUT,
+ Config.AlpineKey.DATABASE_POOL_IDLE_TIMEOUT,
+ Config.getInstance()::getPropertyAsInt
+ ));
+ hikariConfig.setKeepaliveTime(getConfigPropertyWithFallback(
+ Config.AlpineKey.DATABASE_POOL_NONTX_KEEPALIVE_INTERVAL,
+ Config.AlpineKey.DATABASE_POOL_KEEPALIVE_INTERVAL,
+ Config.getInstance()::getPropertyAsInt
+ ));
+ return new HikariDataSource(hikariConfig);
+ }
+
+ private HikariConfig createBaseHikariConfig(final String poolName) {
+ final var hikariConfig = new HikariConfig();
+ hikariConfig.setPoolName(poolName);
+ hikariConfig.setJdbcUrl(Config.getInstance().getProperty(Config.AlpineKey.DATABASE_URL));
+ hikariConfig.setDriverClassName(Config.getInstance().getProperty(Config.AlpineKey.DATABASE_DRIVER));
+ hikariConfig.setUsername(Config.getInstance().getProperty(Config.AlpineKey.DATABASE_USERNAME));
+ hikariConfig.setPassword(Config.getInstance().getProperty(Config.AlpineKey.DATABASE_PASSWORD));
+
+ if (Config.getInstance().getPropertyAsBoolean(Config.AlpineKey.METRICS_ENABLED)) {
+ hikariConfig.setMetricRegistry(Metrics.getRegistry());
+ }
+
+ return hikariConfig;
+ }
+
+ private