diff --git a/_config.yml b/_config.yml
index 03ca8342..d90998a0 100644
--- a/_config.yml
+++ b/_config.yml
@@ -195,6 +195,19 @@ defaults:
toc: true
toc_sticky: true
search: true
+ # 3.12
+ - scope:
+ path: "docs/3.12" # Specifies the name of the folder where this version of docs are located.
+ # type: "" # Since this scope uses `collection_dir`, we do not need to specify the type here.
+ values:
+ layout: page # Specifies the type of template used from the "_layouts" folder.
+ read_time: false # Shows the average reading time for pages.
+ share: false # Shows social media buttons to share pages.
+ sidebar: # Shows side navigation content from `_data/navigation.yml`.
+ nav: "latest" # Add the version enclosed within quotation marks. If the docs in the navigation is for the latest version of the product, be sure to set `nav:` to `"latest"`. If the docs in the navigation is for a previous version of the product, be sure to set `nav:` to the product version number (e.g., `"3.8"`). That version number must match the set of docs for that product version in `_data/navigation.yml`.
+ toc: true
+ toc_sticky: true
+ search: false
# 3.11
- scope:
path: "docs/3.11" # Specifies the name of the folder where this version of docs are located.
@@ -204,7 +217,7 @@ defaults:
read_time: false # Shows the average reading time for pages.
share: false # Shows social media buttons to share pages.
sidebar: # Shows side navigation content from `_data/navigation.yml`.
- nav: "latest" # Add the version enclosed within quotation marks. If the docs in the navigation is for the latest version of the product, be sure to set `nav:` to `"latest"`. If the docs in the navigation is for a previous version of the product, be sure to set `nav:` to the product version number (e.g., `"3.8"`). That version number must match the set of docs for that product version in `_data/navigation.yml`.
+ nav: "3.11" # Add the version enclosed within quotation marks. If the docs in the navigation is for the latest version of the product, be sure to set `nav:` to `"latest"`. If the docs in the navigation is for a previous version of the product, be sure to set `nav:` to the product version number (e.g., `"3.8"`). That version number must match the set of docs for that product version in `_data/navigation.yml`.
toc: true
toc_sticky: true
search: false
diff --git a/_data/navigation.yml b/_data/navigation.yml
index b371445e..b8b440d4 100644
--- a/_data/navigation.yml
+++ b/_data/navigation.yml
@@ -37,8 +37,10 @@ editions:
versions:
- version-top-title: "" # This title is taken from the `version_label` in `_data/ui-text.yml`.
version-children:
- - version-title: "3.11 (latest)"
+ - version-title: "3.12 (latest)"
version-url: /docs/latest/getting-started-with-scalardb/
+ - version-title: "3.11"
+ version-url: /docs/3.11/getting-started-with-scalardb/
- version-title: "3.10"
version-url: /docs/3.10/getting-started-with-scalardb/
- version-title: "3.9"
@@ -62,9 +64,9 @@ versions:
# After that, add a new scope in `_config.yml` to include an item that provides the proper versioned navigation to the site when someone visits the page (i.e., make sure people who visit a version 3.8 doc are shown a side navigation that includes only 3.8 docs.)
"latest":
- - title: "⬅ ScalarDB Enterprise docs home"
+ - title: "⬅ ScalarDB Enterprise docs home"
url: /docs/ # Don't change this URL. This links back to the parent product home page.
- - title: "ScalarDB 3.11 Enterprise"
+ - title: "ScalarDB 3.12 Enterprise"
children:
# Get Started docs
- title: "Get Started"
@@ -88,9 +90,9 @@ versions:
- title: "Getting Started with the ScalarDB Cluster .NET Client SDK"
url: /docs/latest/scalardb-cluster-dotnet-client-sdk/
# Samples docs
- - title: "Samples"
+ - title: "Samples"
children:
- - title: "ScalarDB Samples"
+ - title: "ScalarDB Samples"
url: /docs/latest/scalardb-samples/README/
# - title: "ScalarDB Samples"
# url: /docs/latest/scalardb-samples/scalardb-sample/README/
@@ -171,7 +173,7 @@ versions:
# - title: "Migrate"
# children:
# - title: ""
- # url:
+ # url:
# Reference docs
- title: "Reference"
children:
@@ -211,8 +213,158 @@ versions:
- title: "Release Support Policy"
url: /docs/releases/release-support-policy/
+"3.11":
+ - title: "⬅ ScalarDB Enterprise docs home"
+ url: /docs/ # Don't change this URL. This links back to the parent product home page.
+ - title: "ScalarDB 3.11 Enterprise"
+ children:
+ # Get Started docs
+ - title: "Get Started"
+ children:
+ # - title: "Getting Started with ScalarDB on Cassandra"
+ # url: /docs/3.11/getting-started-with-scalardb-on-cassandra/
+ # - title: "Getting Started with ScalarDB on Cosmos DB"
+ # url: /docs/3.11/getting-started-with-scalardb-on-cosmosdb/
+ # - title: "Getting Started with ScalarDB on DynamoDB"
+ # url: /docs/3.11/getting-started-with-scalardb-on-dynamodb/
+ # - title: "Getting Started with ScalarDB on JDBC databases"
+ # url: /docs/3.11/getting-started-with-scalardb-on-jdbc/
+ - title: "Getting Started with ScalarDB"
+ url: /docs/3.11/getting-started-with-scalardb/
+ - title: "Getting Started with ScalarDB by Using Kotlin"
+ url: /docs/3.11/getting-started-with-scalardb-by-using-kotlin/
+ - title: "Getting Started with ScalarDB Analytics with PostgreSQL"
+ url: /docs/3.11/scalardb-analytics-postgresql/getting-started/
+ - title: "Getting Started with ScalarDB Cluster"
+ url: /docs/3.11/scalardb-cluster/getting-started-with-scalardb-cluster-overview/
+ - title: "Getting Started with the ScalarDB Cluster .NET Client SDK"
+ url: /docs/3.11/scalardb-cluster-dotnet-client-sdk/
+ # Samples docs
+ - title: "Samples"
+ children:
+ - title: "ScalarDB Samples"
+ url: /docs/3.11/scalardb-samples/README/
+ # - title: "ScalarDB Samples"
+ # url: /docs/3.11/scalardb-samples/scalardb-sample/README/
+ # - title: "Microservice Transaction Sample"
+ # url: /docs/3.11/scalardb-samples/microservice-transaction-sample/README/
+ # - title: "Multi-Storage Transaction Sample"
+ # url: /docs/3.11/scalardb-samples/multi-storage-transaction-sample/README/
+ # - title: "ScalarDB GraphQL Sample"
+ # url: /docs/3.11/scalardb-samples/scalardb-graphql-sample/README/
+ # - title: "Spring Data Multi-Storage Transaction Sample"
+ # url: /docs/3.11/scalardb-samples/spring-data-multi-storage-transaction-sample/README/
+ # - title: "Spring Data Sample"
+ # url: /docs/3.11/scalardb-samples/spring-data-sample/README/
+ # Develop docs
+ - title: "Develop"
+ children:
+ - title: "Add ScalarDB to Your Build"
+ url: /docs/3.11/add-scalardb-to-your-build/
+ - title: "Add ScalarDB SQL to Your Build"
+ url: /docs/3.11/scalardb-sql/add-scalardb-sql-to-your-build/
+ - title: "Developer Guides for ScalarDB"
+ url: /docs/3.11/guides/
+ - title: "Configuration Guides for ScalarDB"
+ url: /docs/3.11/development-configurations/
+ - title: "ScalarDB Schema Loader"
+ url: /docs/3.11/schema-loader/
+ - title: "Importing Existing Tables to ScalarDB by Using ScalarDB Schema Loader"
+ url: /docs/3.11/schema-loader-import/
+ - title: "ScalarDB FDW for ScalarDB Analytics with PostgreSQL"
+ url: /docs/3.11/scalardb-analytics-postgresql/scalardb-fdw/
+ - title: "Schema Importer for ScalarDB Analytics with PostgreSQL"
+ url: /docs/3.11/scalardb-analytics-postgresql/schema-importer/
+ # - title: "Export Function for ScalarDB Data Loader" # May be added in the near future.
+ # url: /docs/3.11/scalardb-data-loader/getting-started-export/
+ # - title: "Import Function for ScalarDB Data Loader" # May be added in the near future.
+ # url: /docs/3.11/scalardb-data-loader/getting-started-import/
+ - title: "ScalarDB Auth with ScalarDB SQL"
+ url : /docs/3.11/scalardb-cluster/scalardb-auth-with-sql/
+ - title: "How to Run ScalarDB GraphQL Server"
+ url: /docs/3.11/scalardb-graphql/how-to-run-server/
+ - title: "How to Run Two-Phase Commit Transaction by Using ScalarDB GraphQL"
+ url: /docs/3.11/scalardb-graphql/how-to-run-two-phase-commit-transaction/
+ - title: "ScalarDB SQL Command Line Interface"
+ url: /docs/3.11/scalardb-sql/command-line-interface/
+ # Deploy docs
+ - title: "Deploy"
+ children:
+ - title: "Deploying ScalarDB on Managed Kubernetes Services"
+ url: /docs/3.11/scalar-kubernetes/deploy-kubernetes/
+ - title: "Getting Started with Scalar Helm Charts"
+ url: /docs/3.11/helm-charts/getting-started-scalar-helm-charts/
+ - title: "Set Up ScalarDB Cluster on Kubernetes by Using a Helm Chart"
+ url: /docs/3.11/scalardb-cluster/setup-scalardb-cluster-on-kubernetes-by-using-helm-chart/
+ - title: "Configure a Custom Values File for Scalar Helm Charts"
+ url: /docs/3.11/helm-charts/configure-custom-values-file/
+ - title: "Deploy Scalar Products Using Scalar Helm Charts"
+ url: /docs/3.11/helm-charts/how-to-deploy-scalar-products/
+ - title: "Mount Any Files or Volumes on Scalar Product Pods"
+ url: /docs/3.11/helm-charts/mount-files-or-volumes-on-scalar-pods/
+ - title: "How to Use Secret Resources to Pass Credentials as Environment Variables into the Properties File"
+ url: /docs/3.11/helm-charts/use-secret-for-credentials/
+ - title: "How to Install ScalarDB Analytics with PostgreSQL in Your Local Environment by Using Docker"
+ url: /docs/3.11/scalardb-analytics-postgresql/installation/
+ - title: "ScalarDB Cluster Standalone Mode"
+ url: /docs/3.11/scalardb-cluster/standalone-mode/
+ - title: "ScalarDB GraphQL Deployment Guide on AWS"
+ url: /docs/3.11/scalardb-graphql/aws-deployment-guide/
+ - title: "ScalarDB SQL Server"
+ url: /docs/3.11/scalardb-sql/sql-server/
+ # Manage docs
+ - title: "Manage"
+ children:
+ - title: "How to Back Up and Restore Databases Used Through ScalarDB"
+ url: /docs/3.11/backup-restore/
+ - title: "Managing ScalarDB on Managed Kubernetes Services"
+ url: /docs/3.11/scalar-kubernetes/manage-kubernetes/
+ # Migrate docs
+ # - title: "Migrate"
+ # children:
+ # - title: ""
+ # url:
+ # Reference docs
+ - title: "Reference"
+ children:
+ - title: "ScalarDB Design Document"
+ url: /docs/3.11/design/
+ - title: "ScalarDB Supported Databases"
+ url: /docs/3.11/scalardb-supported-databases/
+ - title: "Requirements and Recommendations for the Underlying Databases of ScalarDB"
+ url: /docs/3.11/requirements/
+ - title: "Storage Abstraction and API Guide"
+ url: /docs/3.11/storage-abstraction/
+ - title: "ScalarDB Benchmarks"
+ url: /docs/3.11/scalardb-benchmarks/README/
+ - title: "ScalarDB Cluster"
+ url: /docs/3.11/scalardb-cluster/
+ - title: "ScalarDB SQL Grammar"
+ url: /docs/3.11/scalardb-sql/grammar/
+ # Release docs and notes
+ - title: "Releases"
+ children:
+ - title: "Release Notes"
+ url: /docs/releases/
+ # - title: "v3.10"
+ # url: /docs/releases/release-3.10/
+ # - title: "v3.9"
+ # url: /docs/releases/release-3.9/
+ # - title: "v3.8"
+ # url: /docs/releases/release-3.8/
+ # - title: "v3.7"
+ # url: /docs/releases/release-3.7/
+ # - title: "v3.6"
+ # url: /docs/releases/release-3.6/
+ # - title: "v3.5"
+ # url: /docs/releases/release-3.5/
+ # - title: "v3.4"
+ # url: /docs/releases/release-3.4/
+ - title: "Release Support Policy"
+ url: /docs/releases/release-support-policy/
+
"3.10":
- - title: "⬅ ScalarDB Enterprise docs home"
+ - title: "⬅ ScalarDB Enterprise docs home"
url: /docs/ # Don't change this URL. This links back to the parent product home page.
- title: "ScalarDB 3.10 Enterprise"
children:
@@ -236,9 +388,9 @@ versions:
- title: "Getting Started with ScalarDB Cluster"
url: /docs/3.10/scalardb-cluster/getting-started-with-scalardb-cluster-overview/
# Samples docs
- - title: "Samples"
+ - title: "Samples"
children:
- - title: "ScalarDB Samples"
+ - title: "ScalarDB Samples"
url: /docs/3.10/scalardb-samples/README/
# - title: "ScalarDB Samples"
# url: /docs/3.10/scalardb-samples/scalardb-sample/README/
@@ -354,7 +506,7 @@ versions:
url: /docs/releases/release-support-policy/
"3.9":
- - title: "⬅ ScalarDB Enterprise docs home"
+ - title: "⬅ ScalarDB Enterprise docs home"
url: /docs/ # Don't change this URL. This links back to the parent product home page.
- title: "ScalarDB 3.9 Enterprise"
children:
@@ -378,9 +530,9 @@ versions:
- title: "Getting Started with ScalarDB Cluster"
url: /docs/3.9/scalardb-cluster/getting-started-with-scalardb-cluster-overview/
# Samples docs
- - title: "Samples"
+ - title: "Samples"
children:
- - title: "ScalarDB Samples"
+ - title: "ScalarDB Samples"
url: /docs/3.9/scalardb-samples/README/
# - title: "ScalarDB Samples"
# url: /docs/3.9/scalardb-samples/scalardb-sample/README/
@@ -496,7 +648,7 @@ versions:
url: /docs/releases/release-support-policy/
"3.8":
- - title: "⬅ ScalarDB Enterprise docs home"
+ - title: "⬅ ScalarDB Enterprise docs home"
url: /docs/ # Don't change this URL. This links back to the parent product home page.
- title: "ScalarDB 3.8 Enterprise"
children:
@@ -522,9 +674,9 @@ versions:
- title: "Getting Started with ScalarDB SQL"
url: /docs/3.8/scalardb-sql/getting-started-with-sql/
# Samples docs
- - title: "Samples"
+ - title: "Samples"
children:
- - title: "ScalarDB Samples"
+ - title: "ScalarDB Samples"
url: /docs/3.8/scalardb-samples/README/
# - title: "ScalarDB Samples"
# url: /docs/3.8/scalardb-samples/scalardb-sample/README/
@@ -638,7 +790,7 @@ versions:
url: /docs/releases/release-support-policy/
"3.7":
- - title: "⬅ ScalarDB Enterprise docs home"
+ - title: "⬅ ScalarDB Enterprise docs home"
url: /docs/ # Don't change this URL. This links back to the parent product home page.
- title: "ScalarDB 3.7 Enterprise"
children:
@@ -664,9 +816,9 @@ versions:
- title: "Getting Started with ScalarDB SQL"
url: /docs/3.7/scalardb-sql/getting-started-with-sql/
# Samples docs
- - title: "Samples"
+ - title: "Samples"
children:
- - title: "ScalarDB Samples"
+ - title: "ScalarDB Samples"
url: /docs/3.7/scalardb-samples/README/
# - title: "ScalarDB Samples"
# url: /docs/3.7/scalardb-samples/scalardb-sample/README/
@@ -778,7 +930,7 @@ versions:
url: /docs/releases/release-support-policy/
"3.6":
- - title: "⬅ ScalarDB Enterprise docs home"
+ - title: "⬅ ScalarDB Enterprise docs home"
url: /docs/ # Don't change this URL. This links back to the parent product home page.
- title: "ScalarDB 3.6 Enterprise"
children:
@@ -804,9 +956,9 @@ versions:
- title: "Getting Started with ScalarDB SQL"
url: /docs/3.6/scalardb-sql/getting-started-with-sql/
# Samples docs
- - title: "Samples"
+ - title: "Samples"
children:
- - title: "ScalarDB Samples"
+ - title: "ScalarDB Samples"
url: /docs/3.6/scalardb-samples/README/
# - title: "ScalarDB Samples"
# url: /docs/3.6/scalardb-samples/scalardb-sample/README/
@@ -918,7 +1070,7 @@ versions:
url: /docs/releases/release-support-policy/
"3.5":
- - title: "⬅ ScalarDB Enterprise docs home"
+ - title: "⬅ ScalarDB Enterprise docs home"
url: /docs/ # Don't change this URL. This links back to the parent product home page.
- title: "ScalarDB 3.5 Enterprise"
children:
@@ -940,9 +1092,9 @@ versions:
- title: "Getting Started with ScalarDB GraphQL"
url: /docs/3.5/scalardb-graphql/getting-started-with-scalardb-graphql/
# Samples docs
- - title: "Samples"
+ - title: "Samples"
children:
- - title: "ScalarDB Samples"
+ - title: "ScalarDB Samples"
url: /docs/3.5/scalardb-samples/README/
# - title: "ScalarDB Samples"
# url: /docs/3.5/scalardb-samples/scalardb-sample/README/
@@ -1048,7 +1200,7 @@ versions:
url: /docs/releases/release-support-policy/
"3.4":
- - title: "⬅ ScalarDB Enterprise docs home"
+ - title: "⬅ ScalarDB Enterprise docs home"
url: /docs/ # Don't change this URL. This links back to the parent product home page.
- title: "ScalarDB 3.4 Enterprise"
children:
@@ -1068,9 +1220,9 @@ versions:
- title: "Getting Started with ScalarDB"
url: /docs/3.4/getting-started-with-scalardb/
# Samples docs
- - title: "Samples"
+ - title: "Samples"
children:
- - title: "ScalarDB Samples"
+ - title: "ScalarDB Samples"
url: /docs/3.4/scalardb-samples/README/
# - title: "ScalarDB Samples"
# url: /docs/3.4/scalardb-samples/scalardb-sample/README/
diff --git a/docs/3.12/add-scalardb-to-your-build.md b/docs/3.12/add-scalardb-to-your-build.md
new file mode 100644
index 00000000..85d8e7fa
--- /dev/null
+++ b/docs/3.12/add-scalardb-to-your-build.md
@@ -0,0 +1,37 @@
+# Add ScalarDB to Your Build
+
+The ScalarDB library is available on the [Maven Central Repository](https://mvnrepository.com/artifact/com.scalar-labs/scalardb). You can add the library as a build dependency to your application by using Gradle or Maven.
+
+## Configure your application based on your build tool
+
+Select your build tool, and follow the instructions to add the build dependency for ScalarDB to your application.
+
+
+
+
+
+
+
+
+
+To add the build dependency for ScalarDB by using Gradle, add the following to `build.gradle` in your application, replacing `` with the version of ScalarDB that you want to use:
+
+```gradle
+dependencies {
+ implementation 'com.scalar-labs:scalardb:'
+}
+```
+
+
+
+To add the build dependency for ScalarDB by using Maven, add the following to `pom.xml` in your application, replacing `` with the version of ScalarDB that you want to use:
+
+```xml
+
+ com.scalar-labs
+ scalardb
+
+
+```
+
+
diff --git a/docs/3.12/api-guide.md b/docs/3.12/api-guide.md
new file mode 100644
index 00000000..2e0cb99b
--- /dev/null
+++ b/docs/3.12/api-guide.md
@@ -0,0 +1,1176 @@
+# ScalarDB Java API Guide
+
+The ScalarDB Java API is mainly composed of the Administrative API and Transactional API. This guide briefly explains what kinds of APIs exist, how to use them, and related topics like how to handle exceptions.
+
+## Administrative API
+
+This section explains how to execute administrative operations programmatically by using the Administrative API in ScalarDB.
+
+{% capture notice--info %}
+**Note**
+
+Another method for executing administrative operations is to use [Schema Loader](schema-loader.md).
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+### Get a `DistributedTransactionAdmin` instance
+
+You first need to get a `DistributedTransactionAdmin` instance to execute administrative operations.
+
+To get a `DistributedTransactionAdmin` instance, you can use `TransactionFactory` as follows:
+
+```java
+TransactionFactory transactionFactory = TransactionFactory.create("");
+DistributedTransactionAdmin admin = transactionFactory.getTransactionAdmin();
+```
+
+For details about configurations, see [ScalarDB Configurations](configurations.md).
+
+After you have executed all administrative operations, you should close the `DistributedTransactionAdmin` instance as follows:
+
+```java
+admin.close();
+```
+
+### Create a namespace
+
+Before creating tables, namespaces must be created since a table belongs to one namespace.
+
+You can create a namespace as follows:
+
+```java
+// Create the namespace "ns". If the namespace already exists, an exception will be thrown.
+admin.createNamespace("ns");
+
+// Create the namespace only if it does not already exist.
+boolean ifNotExists = true;
+admin.createNamespace("ns", ifNotExists);
+
+// Create the namespace with options.
+Map options = ...;
+admin.createNamespace("ns", options);
+```
+
+#### Creation options
+
+In the creation operations, like creating a namespace and creating a table, you can specify options that are maps of option names and values (`Map`). By using the options, you can set storage adapter–specific configurations.
+
+Select your database to see the options available:
+
+
+
+
+
+
+
+
+
+
+
+| Name | Description | Default |
+|----------------------|----------------------------------------------------------------------------------------|------------------|
+| replication-strategy | Cassandra replication strategy. Must be `SimpleStrategy` or `NetworkTopologyStrategy`. | `SimpleStrategy` |
+| compaction-strategy | Cassandra compaction strategy, Must be `LCS`, `STCS` or `TWCS`. | `STCS` |
+| replication-factor | Cassandra replication factor. | 1 |
+
+
+
+
+| Name | Description | Default |
+|------------|-----------------------------------------------------|---------|
+| ru | Base resource unit. | 400 |
+| no-scaling | Disable auto-scaling for Cosmos DB for NoSQL. | false |
+
+
+
+
+| Name | Description | Default |
+|------------|-----------------------------------------|---------|
+| no-scaling | Disable auto-scaling for DynamoDB. | false |
+| no-backup | Disable continuous backup for DynamoDB. | false |
+| ru | Base resource unit. | 10 |
+
+
+
+
+No options are available for JDBC databases.
+
+
+
+
+### Create a table
+
+When creating a table, you should define the table metadata and then create the table.
+
+To define the table metadata, you can use `TableMetadata`. The following shows how to define the columns, partition key, clustering key including clustering orders, and secondary indexes of a table:
+
+```java
+// Define the table metadata.
+TableMetadata tableMetadata =
+ TableMetadata.newBuilder()
+ .addColumn("c1", DataType.INT)
+ .addColumn("c2", DataType.TEXT)
+ .addColumn("c3", DataType.BIGINT)
+ .addColumn("c4", DataType.FLOAT)
+ .addColumn("c5", DataType.DOUBLE)
+ .addPartitionKey("c1")
+ .addClusteringKey("c2", Scan.Ordering.Order.DESC)
+ .addClusteringKey("c3", Scan.Ordering.Order.ASC)
+ .addSecondaryIndex("c4")
+ .build();
+```
+
+For details about the data model of ScalarDB, see [Data Model](design.md#data-model).
+
+Then, create a table as follows:
+
+```java
+// Create the table "ns.tbl". If the table already exists, an exception will be thrown.
+admin.createTable("ns", "tbl", tableMetadata);
+
+// Create the table only if it does not already exist.
+boolean ifNotExists = true;
+admin.createTable("ns", "tbl", tableMetadata, ifNotExists);
+
+// Create the table with options.
+Map options = ...;
+admin.createTable("ns", "tbl", tableMetadata, options);
+```
+
+### Create a secondary index
+
+You can create a secondary index as follows:
+
+```java
+// Create a secondary index on column "c5" for table "ns.tbl". If a secondary index already exists, an exception will be thrown.
+admin.createIndex("ns", "tbl", "c5");
+
+// Create the secondary index only if it does not already exist.
+boolean ifNotExists = true;
+admin.createIndex("ns", "tbl", "c5", ifNotExists);
+
+// Create the secondary index with options.
+Map options = ...;
+admin.createIndex("ns", "tbl", "c5", options);
+```
+
+### Add a new column to a table
+
+You can add a new, non-partition key column to a table as follows:
+
+```java
+// Add a new column "c6" with the INT data type to the table "ns.tbl".
+admin.addNewColumnToTable("ns", "tbl", "c6", DataType.INT)
+```
+
+{% capture notice--warning %}
+**Attention**
+
+You should carefully consider adding a new column to a table because the execution time may vary greatly depending on the underlying storage. Please plan accordingly and consider the following, especially if the database runs in production:
+
+- **For Cosmos DB for NoSQL and DynamoDB:** Adding a column is almost instantaneous as the table schema is not modified. Only the table metadata stored in a separate table is updated.
+- **For Cassandra:** Adding a column will only update the schema metadata and will not modify the existing schema records. The cluster topology is the main factor for the execution time. Changes to the schema metadata are shared to each cluster node via a gossip protocol. Because of this, the larger the cluster, the longer it will take for all nodes to be updated.
+- **For relational databases (MySQL, Oracle, etc.):** Adding a column shouldn't take a long time to execute.
+{% endcapture %}
+
+
{{ notice--warning | markdownify }}
+
+### Truncate a table
+
+You can truncate a table as follows:
+
+```java
+// Truncate the table "ns.tbl".
+admin.truncateTable("ns", "tbl");
+```
+
+### Drop a secondary index
+
+You can drop a secondary index as follows:
+
+```java
+// Drop the secondary index on column "c5" from table "ns.tbl". If the secondary index does not exist, an exception will be thrown.
+admin.dropIndex("ns", "tbl", "c5");
+
+// Drop the secondary index only if it exists.
+boolean ifExists = true;
+admin.dropIndex("ns", "tbl", "c5", ifExists);
+```
+
+### Drop a table
+
+You can drop a table as follows:
+
+```java
+// Drop the table "ns.tbl". If the table does not exist, an exception will be thrown.
+admin.dropTable("ns", "tbl");
+
+// Drop the table only if it exists.
+boolean ifExists = true;
+admin.dropTable("ns", "tbl", ifExists);
+```
+
+### Drop a namespace
+
+You can drop a namespace as follows:
+
+```java
+// Drop the namespace "ns". If the namespace does not exist, an exception will be thrown.
+admin.dropNamespace("ns");
+
+// Drop the namespace only if it exists.
+boolean ifExists = true;
+admin.dropNamespace("ns", ifExists);
+```
+
+### Get the tables of a namespace
+
+You can get the tables of a namespace as follows:
+
+```java
+// Get the tables of the namespace "ns".
+Set tables = admin.getNamespaceTableNames("ns");
+```
+
+### Get table metadata
+
+You can get table metadata as follows:
+
+```java
+// Get the table metadata for "ns.tbl".
+TableMetadata tableMetadata = admin.getTableMetadata("ns", "tbl");
+```
+### Repair a table
+
+You can repair the table metadata of an existing table as follows:
+
+```java
+// Repair the table "ns.tbl" with options.
+TableMetadata tableMetadata =
+ TableMetadata.newBuilder()
+ ...
+ .build();
+Map options = ...;
+admin.repairTable("ns", "tbl", tableMetadata, options);
+```
+
+### Specify operations for the Coordinator table
+
+The Coordinator table is used by the [Transactional API](#transactional-api) to track the statuses of transactions.
+
+When using a transaction manager, you must create the Coordinator table to execute transactions. In addition to creating the table, you can truncate and drop the Coordinator table.
+
+#### Create the Coordinator table
+
+You can create the Coordinator table as follows:
+
+```java
+// Create the Coordinator table.
+admin.createCoordinatorTables();
+
+// Create the Coordinator table only if one does not already exist.
+boolean ifNotExist = true;
+admin.createCoordinatorTables(ifNotExist);
+
+// Create the Coordinator table with options.
+Map options = ...;
+admin.createCoordinatorTables(options);
+```
+
+#### Truncate the Coordinator table
+
+You can truncate the Coordinator table as follows:
+
+```java
+// Truncate the Coordinator table.
+admin.truncateCoordinatorTables();
+```
+
+#### Drop the Coordinator table
+
+You can drop the Coordinator table as follows:
+
+```java
+// Drop the Coordinator table.
+admin.dropCoordinatorTables();
+
+// Drop the Coordinator table if one exist.
+boolean ifExist = true;
+admin.dropCoordinatorTables(ifExist);
+```
+
+### Import a table
+
+You can import an existing table to ScalarDB as follows:
+
+```java
+// Import the table "ns.tbl". If the table is already managed by ScalarDB, the target table does not
+// exist, or the table does not meet the requirements of the ScalarDB table, an exception will be thrown.
+admin.importTable("ns", "tbl", options);
+```
+
+{% capture notice--warning %}
+**Attention**
+
+You should carefully plan to import a table to ScalarDB in production because it will add transaction metadata columns to your database tables and the ScalarDB metadata tables. In this case, there would also be several differences between your database and ScalarDB, as well as some limitations. For details, see [Importing Existing Tables to ScalarDB by Using ScalarDB Schema Loader](./schema-loader-import.md).
+
+{% endcapture %}
+
+
{{ notice--warning | markdownify }}
+
+## Transactional API
+
+This section explains how to execute transactional operations by using the Transactional API in ScalarDB.
+
+### Get a `DistributedTransactionManager` instance
+
+You first need to get a `DistributedTransactionManager` instance to execute transactional operations.
+
+To get a `DistributedTransactionManager` instance, you can use `TransactionFactory` as follows:
+
+```java
+TransactionFactory transactionFactory = TransactionFactory.create("");
+DistributedTransactionManager transactionManager = transactionFactory.getTransactionManager();
+```
+
+After you have executed all transactional operations, you should close the `DistributedTransactionManager` instance as follows:
+
+```java
+transactionManager.close();
+```
+
+### Begin or start a transaction
+
+Before executing transactional CRUD operations, you need to begin or start a transaction.
+
+You can begin a transaction as follows:
+
+```java
+// Begin a transaction.
+DistributedTransaction transaction = transactionManager.begin();
+```
+
+Or, you can start a transaction as follows:
+
+```java
+// Start a transaction.
+DistributedTransaction transaction = transactionManager.start();
+```
+
+Alternatively, you can use the `begin` method for a transaction by specifying a transaction ID as follows:
+
+```java
+// Begin a transaction with specifying a transaction ID.
+DistributedTransaction transaction = transactionManager.begin("");
+```
+
+Or, you can use the `start` method for a transaction by specifying a transaction ID as follows:
+
+```java
+// Start a transaction with specifying a transaction ID.
+DistributedTransaction transaction = transactionManager.start("");
+```
+
+{% capture notice--info %}
+**Note**
+
+Specifying a transaction ID is useful when you want to link external systems to ScalarDB. Otherwise, you should use the `begin()` method or the `start()` method.
+
+When you specify a transaction ID, make sure you specify a unique ID (for example, UUID v4) throughout the system since ScalarDB depends on the uniqueness of transaction IDs for correctness.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+### Join a transaction
+
+Joining a transaction is particularly useful in a stateful application where a transaction spans multiple client requests. In such a scenario, the application can start a transaction during the first client request. Then, in subsequent client requests, the application can join the ongoing transaction by using the `join()` method.
+
+You can join an ongoing transaction that has already begun by specifying the transaction ID as follows:
+
+```java
+// Join a transaction.
+DistributedTransaction transaction = transactionManager.join("");
+```
+
+{% capture notice--info %}
+**Note**
+
+To get the transaction ID with `getId()`, you can specify the following:
+
+```java
+tx.getId();
+```
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+### Resume a transaction
+
+Resuming a transaction is particularly useful in a stateful application where a transaction spans multiple client requests. In such a scenario, the application can start a transaction during the first client request. Then, in subsequent client requests, the application can resume the ongoing transaction by using the `resume()` method.
+
+You can resume an ongoing transaction that you have already begun by specifying a transaction ID as follows:
+
+```java
+// Resume a transaction.
+DistributedTransaction transaction = transactionManager.resume("");
+```
+
+{% capture notice--info %}
+**Note**
+
+To get the transaction ID with `getId()`, you can specify the following:
+
+```java
+tx.getId();
+```
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+### Implement CRUD operations
+
+The following sections describe key construction and CRUD operations.
+
+{% capture notice--info %}
+**Note**
+
+Although all the builders of the CRUD operations can specify consistency by using the `consistency()` methods, those methods are ignored. Instead, the `LINEARIZABLE` consistency level is always used in transactions.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+#### Key construction
+
+Most CRUD operations need to specify `Key` objects (partition-key, clustering-key, etc.). So, before moving on to CRUD operations, the following explains how to construct a `Key` object.
+
+For a single column key, you can use `Key.of()` methods to construct the key as follows:
+
+```java
+// For a key that consists of a single column of INT.
+Key key1 = Key.ofInt("col1", 1);
+
+// For a key that consists of a single column of BIGINT.
+Key key2 = Key.ofBigInt("col1", 100L);
+
+// For a key that consists of a single column of DOUBLE.
+Key key3 = Key.ofDouble("col1", 1.3d);
+
+// For a key that consists of a single column of TEXT.
+Key key4 = Key.ofText("col1", "value");
+```
+
+For a key that consists of two to five columns, you can use the `Key.of()` method to construct the key as follows. Similar to `ImmutableMap.of()` in Guava, you need to specify column names and values in turns:
+
+```java
+// For a key that consists of two to five columns.
+Key key1 = Key.of("col1", 1, "col2", 100L);
+Key key2 = Key.of("col1", 1, "col2", 100L, "col3", 1.3d);
+Key key3 = Key.of("col1", 1, "col2", 100L, "col3", 1.3d, "col4", "value");
+Key key4 = Key.of("col1", 1, "col2", 100L, "col3", 1.3d, "col4", "value", "col5", false);
+```
+
+For a key that consists of more than five columns, we can use the builder to construct the key as follows:
+
+```java
+// For a key that consists of more than five columns.
+Key key = Key.newBuilder()
+ .addInt("col1", 1)
+ .addBigInt("col2", 100L)
+ .addDouble("col3", 1.3d)
+ .addText("col4", "value")
+ .addBoolean("col5", false)
+ .addInt("col6", 100)
+ .build();
+```
+
+#### `Get` operation
+
+`Get` is an operation to retrieve a single record specified by a primary key.
+
+You need to create a `Get` object first, and then you can execute the object by using the `transaction.get()` method as follows:
+
+```java
+// Create a `Get` operation.
+Key partitionKey = Key.ofInt("c1", 10);
+Key clusteringKey = Key.of("c2", "aaa", "c3", 100L);
+
+Get get =
+ Get.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .projections("c1", "c2", "c3", "c4")
+ .build();
+
+// Execute the `Get` operation.
+Optional result = transaction.get(get);
+```
+
+You can also specify projections to choose which columns are returned.
+
+##### Handle `Result` objects
+
+The `Get` operation and `Scan` operation return `Result` objects. The following shows how to handle `Result` objects.
+
+You can get a column value of a result by using `get("")` methods as follows:
+
+```java
+// Get the BOOLEAN value of a column.
+boolean booleanValue = result.getBoolean("");
+
+// Get the INT value of a column.
+int intValue = result.getInt("");
+
+// Get the BIGINT value of a column.
+long bigIntValue = result.getBigInt("");
+
+// Get the FLOAT value of a column.
+float floatValue = result.getFloat("");
+
+// Get the DOUBLE value of a column.
+double doubleValue = result.getDouble("");
+
+// Get the TEXT value of a column.
+String textValue = result.getText("");
+
+// Get the BLOB value of a column as a `ByteBuffer`.
+ByteBuffer blobValue = result.getBlob("");
+
+// Get the BLOB value of a column as a `byte` array.
+byte[] blobValueAsBytes = result.getBlobAsBytes("");
+```
+
+And if you need to check if a value of a column is null, you can use the `isNull("")` method.
+
+``` java
+// Check if a value of a column is null.
+boolean isNull = result.isNull("");
+```
+
+For more details, see the `Result` page in the [Javadoc](https://javadoc.io/doc/com.scalar-labs/scalardb/latest/index.html) of the version of ScalarDB that you're using.
+
+##### Execute `Get` by using a secondary index
+
+You can execute a `Get` operation by using a secondary index.
+
+Instead of specifying a partition key, you can specify an index key (indexed column) to use a secondary index as follows:
+
+```java
+// Create a `Get` operation by using a secondary index.
+Key indexKey = Key.ofFloat("c4", 1.23F);
+
+Get get =
+ Get.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .indexKey(indexKey)
+ .projections("c1", "c2", "c3", "c4")
+ .build();
+
+// Execute the `Get` operation.
+Optional result = transaction.get(get);
+```
+
+{% capture notice--info %}
+**Note**
+
+If the result has more than one record, `transaction.get()` will throw an exception. If you want to handle multiple results, see [Execute `Scan` by using a secondary index](#execute-scan-by-using-a-secondary-index).
+
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+#### `Scan` operation
+
+`Scan` is an operation to retrieve multiple records within a partition. You can specify clustering-key boundaries and orderings for clustering-key columns in `Scan` operations.
+
+You need to create a `Scan` object first, and then you can execute the object by using the `transaction.scan()` method as follows:
+
+```java
+// Create a `Scan` operation.
+Key partitionKey = Key.ofInt("c1", 10);
+Key startClusteringKey = Key.of("c2", "aaa", "c3", 100L);
+Key endClusteringKey = Key.of("c2", "aaa", "c3", 300L);
+
+Scan scan =
+ Scan.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .start(startClusteringKey, true) // Include startClusteringKey
+ .end(endClusteringKey, false) // Exclude endClusteringKey
+ .projections("c1", "c2", "c3", "c4")
+ .orderings(Scan.Ordering.desc("c2"), Scan.Ordering.asc("c3"))
+ .limit(10)
+ .build();
+
+// Execute the `Scan` operation.
+List results = transaction.scan(scan);
+```
+
+You can omit the clustering-key boundaries or specify either a `start` boundary or an `end` boundary. If you don't specify `orderings`, you will get results ordered by the clustering order that you defined when creating the table.
+
+In addition, you can specify `projections` to choose which columns are returned and use `limit` to specify the number of records to return in `Scan` operations.
+
+##### Execute `Scan` by using a secondary index
+
+You can execute a `Scan` operation by using a secondary index.
+
+Instead of specifying a partition key, you can specify an index key (indexed column) to use a secondary index as follows:
+
+```java
+// Create a `Scan` operation by using a secondary index.
+Key indexKey = Key.ofFloat("c4", 1.23F);
+
+Scan scan =
+ Scan.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .indexKey(indexKey)
+ .projections("c1", "c2", "c3", "c4")
+ .limit(10)
+ .build();
+
+// Execute the `Scan` operation.
+List results = transaction.scan(scan);
+```
+
+{% capture notice--info %}
+**Note**
+
+You can't specify clustering-key boundaries and orderings in `Scan` by using a secondary index.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+##### Execute cross-partition `Scan` without specifying a partition key to retrieve all the records of a table
+
+You can execute a `Scan` operation across all partitions, which we call *cross-partition scan*, without specifying a partition key by enabling the following configuration in the ScalarDB properties file.
+
+```properties
+scalar.db.cross_partition_scan.enabled=true
+```
+
+{% capture notice--warning %}
+**Attention**
+
+For non-JDBC databases, we do not recommend enabling cross-partition scan with the `SERIALIAZABLE` isolation level because transactions could be executed at a lower isolation level (that is, `SNAPSHOT`). When using non-JDBC databases, use cross-partition scan at your own risk only if consistency does not matter for your transactions.
+{% endcapture %}
+
+
{{ notice--warning | markdownify }}
+
+Instead of calling the `partitionKey()` method in the builder, you can call the `all()` method to scan a table without specifying a partition key as follows:
+
+```java
+// Create a `Scan` operation without specifying a partition key.
+Scan scan =
+ Scan.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .all()
+ .projections("c1", "c2", "c3", "c4")
+ .limit(10)
+ .build();
+
+// Execute the `Scan` operation.
+List results = transaction.scan(scan);
+```
+
+{% capture notice--info %}
+**Note**
+
+You can't specify any filtering conditions and orderings in cross-partition `Scan` except for when using JDBC databases. For details on how to use cross-partition `Scan` with filtering or ordering for JDBC databases, see [Execute cross-partition `Scan` with filtering and ordering](#execute-cross-partition-scan-with-filtering-and-ordering).
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+##### Execute cross-partition `Scan` with filtering and ordering
+
+By enabling the cross-partition scan option with filtering and ordering for JDBC databases as follows, you can execute a cross-partition `Scan` operation with flexible conditions and orderings:
+
+```properties
+scalar.db.cross_partition_scan.enabled=true
+scalar.db.cross_partition_scan.filtering.enabled=true
+scalar.db.cross_partition_scan.ordering.enabled=true
+```
+
+You can call the `where()` and `ordering()` methods after calling the `all()` method to specify arbitrary conditions and orderings as follows:
+
+```java
+// Create a `Scan` operation with arbitrary conditions and orderings.
+Scan scan =
+ Scan.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .all()
+ .where(ConditionBuilder.column("c1").isNotEqualToInt(10))
+ .projections("c1", "c2", "c3", "c4")
+ .orderings(Scan.Ordering.desc("c3"), Scan.Ordering.asc("c4"))
+ .limit(10)
+ .build();
+
+// Execute the `Scan` operation.
+List results = transaction.scan(scan);
+```
+
+As an argument of the `where()` method, you can specify a condition, an and-wise condition set, or an or-wise condition set. After calling the `where()` method, you can add more conditions or condition sets by using the `and()` method or `or()` method as follows:
+
+```java
+// Create a `Scan` operation with condition sets.
+Scan scan =
+ Scan.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .all()
+ .where(
+ ConditionSetBuilder.condition(ConditionBuilder.column("c1").isLessThanInt(10))
+ .or(ConditionBuilder.column("c1").isGreaterThanInt(20))
+ .build())
+ .and(
+ ConditionSetBuilder.condition(ConditionBuilder.column("c2").isLikeText("a%"))
+ .or(ConditionBuilder.column("c2").isLikeText("b%"))
+ .build())
+ .limit(10)
+ .build();
+```
+
+{% capture notice--info %}
+**Note**
+
+In the `where()` condition method chain, the conditions must be an and-wise junction of `ConditionalExpression` or `OrConditionSet` (known as conjunctive normal form) like the above example or an or-wise junction of `ConditionalExpression` or `AndConditionSet` (known as disjunctive normal form).
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+For more details about available conditions and condition sets, see the `ConditionBuilder` and `ConditionSetBuilder` page in the [Javadoc](https://javadoc.io/doc/com.scalar-labs/scalardb/latest/index.html) of the version of ScalarDB that you're using.
+
+#### `Put` operation
+
+`Put` is an operation to put a record specified by a primary key. The operation behaves as an upsert operation for a record, in which the operation updates the record if the record exists or inserts the record if the record does not exist.
+
+{% capture notice--info %}
+**Note**
+
+When you update an existing record, you need to read the record by using `Get` or `Scan` before using a `Put` operation. Otherwise, the operation will fail due to a conflict. This occurs because of the specification of ScalarDB to manage transactions properly. Instead of reading the record explicitly, you can enable implicit pre-read. For details, see [Enable implicit pre-read for `Put` operations](#enable-implicit-pre-read-for-put-operations).
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+You need to create a `Put` object first, and then you can execute the object by using the `transaction.put()` method as follows:
+
+```java
+// Create a `Put` operation.
+Key partitionKey = Key.ofInt("c1", 10);
+Key clusteringKey = Key.of("c2", "aaa", "c3", 100L);
+
+Put put =
+ Put.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .floatValue("c4", 1.23F)
+ .doubleValue("c5", 4.56)
+ .build();
+
+// Execute the `Put` operation.
+transaction.put(put);
+```
+
+You can also put a record with `null` values as follows:
+
+```java
+Put put =
+ Put.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .floatValue("c4", null)
+ .doubleValue("c5", null)
+ .build();
+```
+
+##### Enable implicit pre-read for `Put` operations
+
+In Consensus Commit, an application must read a record before mutating the record with `Put` and `Delete` operations to obtain the latest states of the record if the record exists. Instead of reading the record explicitly, you can enable *implicit pre-read*. By enabling implicit pre-read, if an application does not read the record explicitly in a transaction, ScalarDB will read the record on behalf of the application before committing the transaction.
+
+You can enable implicit pre-read for a `Put` operation by specifying `enableImplicitPreRead()` in the `Put` operation builder as follows:
+
+```java
+Put put =
+ Put.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .floatValue("c4", 1.23F)
+ .doubleValue("c5", 4.56)
+ .enableImplicitPreRead()
+ .build();
+```
+
+{% capture notice--info %}
+**Note**
+
+If you are certain that a record you are trying to mutate does not exist, you should not enable implicit pre-read for the `Put` operation for better performance. For example, if you load initial data, you should not enable implicit pre-read. A `Put` operation without implicit pre-read is faster than `Put` operation with implicit pre-read because the operation skips an unnecessary read.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+#### `Delete` operation
+
+`Delete` is an operation to delete a record specified by a primary key.
+
+{% capture notice--info %}
+**Note**
+
+When you delete a record, you don't have to read the record beforehand because implicit pre-read is always enabled for `Delete` operations.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+You need to create a `Delete` object first, and then you can execute the object by using the `transaction.delete()` method as follows:
+
+```java
+// Create a `Delete` operation.
+Key partitionKey = Key.ofInt("c1", 10);
+Key clusteringKey = Key.of("c2", "aaa", "c3", 100L);
+
+Delete delete =
+ Delete.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .build();
+
+// Execute the `Delete` operation.
+transaction.delete(delete);
+```
+
+#### `Put` and `Delete` with a condition
+
+You can write arbitrary conditions (for example, a bank account balance must be equal to or more than zero) that you require a transaction to meet before being committed by implementing logic that checks the conditions in the transaction. Alternatively, you can write simple conditions in a mutation operation, such as `Put` and `Delete`.
+
+When a `Put` or `Delete` operation includes a condition, the operation is executed only if the specified condition is met. If the condition is not met when the operation is executed, an exception called `UnsatisfiedConditionException` will be thrown.
+
+{% capture notice--info %}
+**Note**
+
+When you specify a condition in a `Put` operation, you need to read the record beforehand or enable implicit pre-read.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+
+##### Conditions for `Put`
+
+You can specify a condition in a `Put` operation as follows:
+
+```java
+// Build a condition.
+MutationCondition condition =
+ ConditionBuilder.putIf(ConditionBuilder.column("c4").isEqualToFloat(0.0F))
+ .and(ConditionBuilder.column("c5").isEqualToDouble(0.0))
+ .build();
+
+Put put =
+ Put.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .floatValue("c4", 1.23F)
+ .doubleValue("c5", 4.56)
+ .condition(condition) // condition
+ .build();
+
+// Execute the `Put` operation.
+transaction.put(put);
+```
+
+In addition to using the `putIf` condition, you can specify the `putIfExists` and `putIfNotExists` conditions as follows:
+
+```java
+// Build a `putIfExists` condition.
+MutationCondition putIfExistsCondition = ConditionBuilder.putIfExists();
+
+// Build a `putIfNotExists` condition.
+MutationCondition putIfNotExistsCondition = ConditionBuilder.putIfNotExists();
+```
+
+##### Conditions for `Delete`
+
+You can specify a condition in a `Delete` operation as follows:
+
+```java
+// Build a condition.
+MutationCondition condition =
+ ConditionBuilder.deleteIf(ConditionBuilder.column("c4").isEqualToFloat(0.0F))
+ .and(ConditionBuilder.column("c5").isEqualToDouble(0.0))
+ .build();
+
+Delete delete =
+ Delete.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKey)
+ .condition(condition) // condition
+ .build();
+
+// Execute the `Delete` operation.
+transaction.delete(delete);
+```
+
+In addition to using the `deleteIf` condition, you can specify the `deleteIfExists` condition as follows:
+
+```java
+// Build a `deleteIfExists` condition.
+MutationCondition deleteIfExistsCondition = ConditionBuilder.deleteIfExists();
+```
+
+#### Mutate operation
+
+Mutate is an operation to execute multiple mutations (`Put` and `Delete` operations).
+
+You need to create mutation objects first, and then you can execute the objects by using the `transaction.mutate()` method as follows:
+
+```java
+// Create `Put` and `Delete` operations.
+Key partitionKey = Key.ofInt("c1", 10);
+
+Key clusteringKeyForPut = Key.of("c2", "aaa", "c3", 100L);
+
+Put put =
+ Put.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKeyForPut)
+ .floatValue("c4", 1.23F)
+ .doubleValue("c5", 4.56)
+ .build();
+
+Key clusteringKeyForDelete = Key.of("c2", "bbb", "c3", 200L);
+
+Delete delete =
+ Delete.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .partitionKey(partitionKey)
+ .clusteringKey(clusteringKeyForDelete)
+ .build();
+
+// Execute the operations.
+transaction.mutate(Arrays.asList(put, delete));
+```
+
+#### Default namespace for CRUD operations
+
+A default namespace for all CRUD operations can be set by using a property in the ScalarDB configuration.
+
+```properties
+scalar.db.default_namespace_name=
+```
+
+Any operation that does not specify a namespace will use the default namespace set in the configuration.
+
+```java
+// This operation will target the default namespace.
+Scan scanUsingDefaultNamespace =
+ Scan.newBuilder()
+ .table("tbl")
+ .all()
+ .build();
+// This operation will target the "ns" namespace.
+Scan scanUsingSpecifiedNamespace =
+ Scan.newBuilder()
+ .namespace("ns")
+ .table("tbl")
+ .all()
+ .build();
+```
+
+### Commit a transaction
+
+After executing CRUD operations, you need to commit a transaction to finish it.
+
+You can commit a transaction as follows:
+
+```java
+// Commit a transaction.
+transaction.commit();
+```
+
+### Roll back or abort a transaction
+
+If an error occurs when executing a transaction, you can roll back or abort the transaction.
+
+You can roll back a transaction as follows:
+
+```java
+// Roll back a transaction.
+transaction.rollback();
+```
+
+Or, you can abort a transaction as follows:
+
+```java
+// Abort a transaction.
+transaction.abort();
+```
+
+For details about how to handle exceptions in ScalarDB, see [How to handle exceptions](#how-to-handle-exceptions).
+
+## How to handle exceptions
+
+When executing a transaction, you will also need to handle exceptions properly.
+
+{% capture notice--warning %}
+**Attention**
+
+If you don't handle exceptions properly, you may face anomalies or data inconsistency.
+{% endcapture %}
+
+
{{ notice--warning | markdownify }}
+
+The following sample code shows how to handle exceptions:
+
+```java
+public class Sample {
+ public static void main(String[] args) throws Exception {
+ TransactionFactory factory = TransactionFactory.create("");
+ DistributedTransactionManager transactionManager = factory.getTransactionManager();
+
+ int retryCount = 0;
+ TransactionException lastException = null;
+
+ while (true) {
+ if (retryCount++ > 0) {
+ // Retry the transaction three times maximum.
+ if (retryCount >= 3) {
+ // Throw the last exception if the number of retries exceeds the maximum.
+ throw lastException;
+ }
+
+ // Sleep 100 milliseconds before retrying the transaction.
+ TimeUnit.MILLISECONDS.sleep(100);
+ }
+
+ DistributedTransaction transaction = null;
+ try {
+ // Begin a transaction.
+ transaction = transactionManager.begin();
+
+ // Execute CRUD operations in the transaction.
+ Optional result = transaction.get(...);
+ List results = transaction.scan(...);
+ transaction.put(...);
+ transaction.delete(...);
+
+ // Commit the transaction.
+ transaction.commit();
+ } catch (UnsatisfiedConditionException e) {
+ // You need to handle `UnsatisfiedConditionException` only if a mutation operation specifies a condition.
+ // This exception indicates the condition for the mutation operation is not met.
+
+ try {
+ transaction.rollback();
+ } catch (RollbackException ex) {
+ // Rolling back the transaction failed. Since the transaction should eventually recover,
+ // you don't need to do anything further. You can simply log the occurrence here.
+ }
+
+ // You can handle the exception here, according to your application requirements.
+
+ return;
+ } catch (UnknownTransactionStatusException e) {
+ // If you catch `UnknownTransactionStatusException` when committing the transaction,
+ // it indicates that the status of the transaction, whether it was successful or not, is unknown.
+ // In such a case, you need to check if the transaction is committed successfully or not and
+ // retry the transaction if it failed. How to identify a transaction status is delegated to users.
+ return;
+ } catch (TransactionException e) {
+ // For other exceptions, you can try retrying the transaction.
+
+ // For `CrudConflictException`, `CommitConflictException`, and `TransactionNotFoundException`,
+ // you can basically retry the transaction. However, for the other exceptions, the transaction
+ // will still fail if the cause of the exception is non-transient. In such a case, you will
+ // exhaust the number of retries and throw the last exception.
+
+ if (transaction != null) {
+ try {
+ transaction.rollback();
+ } catch (RollbackException ex) {
+ // Rolling back the transaction failed. The transaction should eventually recover,
+ // so you don't need to do anything further. You can simply log the occurrence here.
+ }
+ }
+
+ lastException = e;
+ }
+ }
+ }
+}
+```
+
+### `TransactionException` and `TransactionNotFoundException`
+
+The `begin()` API could throw `TransactionException` or `TransactionNotFoundException`:
+
+- If you catch `TransactionException`, this exception indicates that the transaction has failed to begin due to transient or non-transient faults. You can try retrying the transaction, but you may not be able to begin the transaction due to non-transient faults.
+- If you catch `TransactionNotFoundException`, this exception indicates that the transaction has failed to begin due to transient faults. In this case, you can retry the transaction.
+
+The `join()` API could also throw `TransactionNotFoundException`. You can handle this exception in the same way that you handle the exceptions for the `begin()` API.
+
+### `CrudException` and `CrudConflictException`
+
+The APIs for CRUD operations (`get()`, `scan()`, `put()`, `delete()`, and `mutate()`) could throw `CrudException` or `CrudConflictException`:
+
+- If you catch `CrudException`, this exception indicates that the transaction CRUD operation has failed due to transient or non-transient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is non-transient.
+- If you catch `CrudConflictException`, this exception indicates that the transaction CRUD operation has failed due to transient faults (for example, a conflict error). In this case, you can retry the transaction from the beginning.
+
+### `UnsatisfiedConditionException`
+
+The APIs for mutation operations (`put()`, `delete()`, and `mutate()`) could also throw `UnsatisfiedConditionException`.
+
+If you catch `UnsatisfiedConditionException`, this exception indicates that the condition for the mutation operation is not met. You can handle this exception according to your application requirements.
+
+### `CommitException`, `CommitConflictException`, and `UnknownTransactionStatusException`
+
+The `commit()` API could throw `CommitException`, `CommitConflictException`, or `UnknownTransactionStatusException`:
+
+- If you catch `CommitException`, this exception indicates that committing the transaction fails due to transient or non-transient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is non-transient.
+- If you catch `CommitConflictException`, this exception indicates that committing the transaction has failed due to transient faults (for example, a conflict error). In this case, you can retry the transaction from the beginning.
+- If you catch `UnknownTransactionStatusException`, this exception indicates that the status of the transaction, whether it was successful or not, is unknown. In this case, you need to check if the transaction is committed successfully and retry the transaction if it has failed.
+
+How to identify a transaction status is delegated to users. You may want to create a transaction status table and update it transactionally with other application data so that you can get the status of a transaction from the status table.
+
+### Notes about some exceptions
+
+Although not illustrated in the sample code, the `resume()` API could also throw `TransactionNotFoundException`. This exception indicates that the transaction associated with the specified ID was not found and/or the transaction might have expired. In either case, you can retry the transaction from the beginning since the cause of this exception is basically transient.
+
+In the sample code, for `UnknownTransactionStatusException`, the transaction is not retried because the application must check if the transaction was successful to avoid potential duplicate operations. For other exceptions, the transaction is retried because the cause of the exception is transient or non-transient. If the cause of the exception is transient, the transaction may succeed if you retry it. However, if the cause of the exception is non-transient, the transaction will still fail even if you retry it. In such a case, you will exhaust the number of retries.
+
+{% capture notice--info %}
+**Note**
+
+In the sample code, the transaction is retried three times maximum and sleeps for 100 milliseconds before it is retried. But you can choose a retry policy, such as exponential backoff, according to your application requirements.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+## Investigating Consensus Commit transaction manager errors
+
+To investigate errors when using the Consensus Commit transaction manager, you can enable a configuration that will return table metadata augmented with transaction metadata columns, which can be helpful when investigating transaction-related issues. This configuration, which is only available when troubleshooting the Consensus Commit transaction manager, enables you to see transaction metadata column details for a given table by using the `DistributedTransactionAdmin.getTableMetadata()` method.
+
+By adding the following configuration, `Get` and `Scan` operations results will contain [transaction metadata](schema-loader.md#internal-metadata-for-consensus-commit):
+
+```properties
+# By default, this configuration is set to `false`.
+scalar.db.consensus_commit.include_metadata.enabled=true
+```
diff --git a/docs/3.12/backup-restore.md b/docs/3.12/backup-restore.md
new file mode 100644
index 00000000..e194bcb4
--- /dev/null
+++ b/docs/3.12/backup-restore.md
@@ -0,0 +1,208 @@
+# How to Back Up and Restore Databases Used Through ScalarDB
+
+Since ScalarDB provides transaction capabilities on top of non-transactional or transactional databases non-invasively, you need to take special care to back up and restore the databases in a transactionally consistent way.
+
+This guide describes how to back up and restore the databases that ScalarDB supports.
+
+## Create a backup
+
+How you create a backup depends on which database you're using and whether or not you're using multiple databases. The following decision tree shows which approach you should take.
+
+```mermaid
+flowchart TD
+ A[Are you using a single database with ScalarDB?]
+ A -->|Yes| B[Does the database have transaction support?]
+ B -->|Yes| C[Perform back up without explicit pausing]
+ B ---->|No| D[Perform back up with explicit pausing]
+ A ---->|No| D
+```
+
+### Back up without explicit pausing
+
+If you're using ScalarDB with a single database with support for transactions, you can create a backup of the database even while ScalarDB continues to accept transactions.
+
+{% capture notice--warning %}
+**Attention**
+
+Before creating a backup, you should consider the safest way to create a transactionally consistent backup of your databases and understand any risks that are associated with the backup process.
+{% endcapture %}
+
+
{{ notice--warning | markdownify }}
+
+One requirement for creating a backup in ScalarDB is that backups for all the ScalarDB-managed tables (including the Coordinator table) need to be transactionally consistent or automatically recoverable to a transactionally consistent state. That means that you need to create a consistent backup by dumping all tables in a single transaction.
+
+How you create a transactionally consistent backup depends on the type of database that you're using. Select a database to see how to create a transactionally consistent backup for ScalarDB.
+
+{% capture notice--info %}
+**Note**
+
+The backup methods by database listed below are just examples of some of the databases that ScalarDB supports.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+
+
+
+
+
+
+
+
+
+
+You can restore to any point within the backup retention period by using the automated backup feature.
+
+
+
+Use the `mysqldump` command with the `--single-transaction` option.
+
+
+
+Use the `pg_dump` command.
+
+
+
+Use the `.backup` command with the `.timeout` command as specified in [Special commands to sqlite3 (dot-commands)](https://www.sqlite.org/cli.html#special_commands_to_sqlite3_dot_commands_)
+
+For an example, see [BASH: SQLite3 .backup command](https://stackoverflow.com/questions/23164445/bash-sqlite3-backup-command).
+
+
+
+### Back up with explicit pausing
+
+Another way to create a transactionally consistent backup is to create a backup while a cluster of ScalarDB instances does not have any outstanding transactions. Creating the backup depends on the following:
+
+- If the underlying database has a point-in-time snapshot or backup feature, you can create a backup during the period when no outstanding transactions exist.
+- If the underlying database has a point-in-time restore or recovery (PITR) feature, you can set a restore point to a time (preferably the mid-time) in the pause duration period when no outstanding transactions exist.
+
+{% capture notice--info %}
+**Note**
+
+When using a PITR feature, you should minimize the clock drifts between clients and servers by using clock synchronization, such as NTP. Otherwise, the time you get as the paused duration might be too different from the time in which the pause was actually conducted, which could restore the backup to a point where ongoing transactions exist.
+
+In addition, you should pause for a sufficient amount of time (for example, five seconds) and use the mid-time of the paused duration as a restore point since clock synchronization cannot perfectly synchronize clocks between nodes.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+To make ScalarDB drain outstanding requests and stop accepting new requests so that a pause duration can be created, you should implement the [Scalar Admin](https://github.com/scalar-labs/scalar-admin) interface properly in your application that uses ScalarDB or use [ScalarDB Cluster (redirects to the Enterprise docs site)](https://scalardb.scalar-labs.com/docs/latest/scalardb-cluster/), which implements the Scalar Admin interface.
+
+By using the [Scalar Admin client tool](https://github.com/scalar-labs/scalar-admin/tree/main/java#scalar-admin-client-tool), you can pause nodes, servers, or applications that implement the Scalar Admin interface without losing ongoing transactions.
+
+How you create a transactionally consistent backup depends on the type of database that you're using. Select a database to see how to create a transactionally consistent backup for ScalarDB.
+
+{% capture notice--warning %}
+**Note**
+
+The backup methods by database listed below are just examples of some of the databases that ScalarDB supports.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+
+
+
+
+
+
+
+
+
+Cassandra has a built-in replication feature, so you do not always have to create a transactionally consistent backup. For example, if the replication factor is set to `3` and only the data of one of the nodes in a Cassandra cluster is lost, you won't need a transactionally consistent backup (snapshot) because the node can be recovered by using a normal, transactionally inconsistent backup (snapshot) and the repair feature.
+
+However, if the quorum of cluster nodes loses their data, you will need a transactionally consistent backup (snapshot) to restore the cluster to a certain transactionally consistent point.
+
+To create a transactionally consistent cluster-wide backup (snapshot), pause the application that is using ScalarDB or [ScalarDB Cluster (redirects to the Enterprise docs site)](https://scalardb.scalar-labs.com/docs/latest/scalardb-cluster/) and create backups (snapshots) of the nodes as described in [Back up with explicit pausing](#back-up-with-explicit-pausing) or stop the Cassandra cluster, take copies of all the data in the nodes, and start the cluster.
+
+
+
+You must create a Cosmos DB for NoSQL account with a continuous backup policy that has the PITR feature enabled. After enabling the feature, backups are created continuously.
+
+To specify a transactionally consistent restore point, pause your application that is using ScalarDB with Cosmos DB for NoSQL as described in [Back up with explicit pausing](#back-up-with-explicit-pausing).
+
+
+
+You must enable the PITR feature for DynamoDB tables. If you're using [ScalarDB Schema Loader](schema-loader.md) to create schemas, the tool enables the PITR feature for tables by default.
+
+To specify a transactionally consistent restore point, pause your application that is using ScalarDB with DynamoDB as described in [Back up with explicit pausing](#back-up-with-explicit-pausing).
+
+
+
+## Restore a backup
+
+How you restore a transactionally consistent backup depends on the type of database that you're using. Select a database to see how to create a transactionally consistent backup for ScalarDB.
+
+{% capture notice--warning %}
+**Note**
+
+The restore methods by database listed below are just examples of some of the databases that ScalarDB supports.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+You can restore to any point within the backup retention period by using the automated backup feature.
+
+
+
+First, stop all the nodes of the Cassandra cluster. Then, clean the `data`, `commitlog`, and `hints` directories, and place the backups (snapshots) in each node.
+
+After placing the backups (snapshots) in each node, start all the nodes of the Cassandra Cluster.
+
+
+
+Follow the official Azure documentation for [restore an account by using Azure portal](https://docs.microsoft.com/en-us/azure/cosmos-db/restore-account-continuous-backup#restore-account-portal). After restoring a backup, [configure the default consistency level](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/how-to-manage-consistency#configure-the-default-consistency-level) of the restored databases to `STRONG`. In addition, you should use the mid-time of the paused duration as the restore point as previously explained.
+
+ScalarDB implements the Cosmos DB adapter by using its stored procedures, which are installed when creating schemas by using ScalarDB Schema Loader. However, the PITR feature of Cosmos DB doesn't restore stored procedures. Because of this, you need to re-install the required stored procedures for all tables after restoration. You can do this by using ScalarDB Schema Loader with the `--repair-all` option. For details, see [Repair tables](schema-loader.md#repair-tables).
+
+
+
+Follow the official AWS documentation for [restoring a DynamoDB table to a point in time](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/PointInTimeRecovery.Tutorial.html), but keep in mind that a table can only be restored with an alias. Because of this, you will need to restore the table with an alias, delete the original table, and rename the alias to the original name to restore the tables with the same name.
+
+To do this procedure:
+
+1. Create a backup.
+ 1. Select the mid-time of the paused duration as the restore point.
+ 2. Restore by using the PITR of table A to table B.
+ 3. Create a backup of the restored table B (assuming that the backup is named backup B).
+ 4. Remove table B.
+2. Restore the backup.
+ 1. Remove table A.
+ 2. Create a table named A by using backup B.
+
+{% capture notice--info %}
+**Note**
+
+* You must do the steps mentioned above for each table because tables can only be restored one at a time.
+* Configurations such as PITR and auto-scaling policies are reset to the default values for restored tables, so you must manually configure the required settings. For details, see the official AWS documentation for [How to restore DynamoDB tables with DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/CreateBackup.html#CreateBackup_HowItWorks-restore).
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+
+
+
+If you used `mysqldump` to create the backup file, use the `mysql` command to restore the backup as specified in [Reloading SQL-Format Backups](https://dev.mysql.com/doc/mysql-backup-excerpt/8.0/en/reloading-sql-format-dumps.html).
+
+
+
+If you used `pg_dump` to create the backup file, use the `psql` command to restore the backup as specified in [Restoring the Dump](https://www.postgresql.org/docs/current/backup-dump.html#BACKUP-DUMP-RESTORE).
+
+
+
+Use the `.restore` command as specified in [Special commands to sqlite3 (dot-commands)](https://www.sqlite.org/cli.html#special_commands_to_sqlite3_dot_commands_).
+
+
diff --git a/docs/3.12/configurations.md b/docs/3.12/configurations.md
new file mode 100644
index 00000000..696b0c79
--- /dev/null
+++ b/docs/3.12/configurations.md
@@ -0,0 +1,276 @@
+# ScalarDB Configurations
+
+This page describes the available configurations for ScalarDB.
+
+## ScalarDB client configurations
+
+ScalarDB provides its own transaction protocol called Consensus Commit. You can use the Consensus Commit protocol directly through the ScalarDB client library or through [ScalarDB Cluster (redirects to the Enterprise docs site)](https://scalardb.scalar-labs.com/docs/3.12/scalardb-cluster/), which is a component that is available only in the ScalarDB Enterprise edition.
+
+### Use Consensus Commit directly
+
+Consensus Commit is the default transaction manager type in ScalarDB. To use the Consensus Commit transaction manager, add the following to the ScalarDB properties file:
+
+```properties
+scalar.db.transaction_manager=consensus-commit
+```
+
+{% capture notice--info %}
+**Note**
+
+If you don't specify the `scalar.db.transaction_manager` property, `consensus-commit` will be the default value.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+#### Basic configurations
+
+The following basic configurations are available for the Consensus Commit transaction manager:
+
+| Name | Description | Default |
+|-------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|
+| `scalar.db.transaction_manager` | `consensus-commit` should be specified. | - |
+| `scalar.db.consensus_commit.isolation_level` | Isolation level used for Consensus Commit. Either `SNAPSHOT` or `SERIALIZABLE` can be specified. | `SNAPSHOT` |
+| `scalar.db.consensus_commit.serializable_strategy` | Serializable strategy used for Consensus Commit. Either `EXTRA_READ` or `EXTRA_WRITE` can be specified. If `SNAPSHOT` is specified in the property `scalar.db.consensus_commit.isolation_level`, this configuration will be ignored. | `EXTRA_READ` |
+| `scalar.db.consensus_commit.coordinator.namespace` | Namespace name of Coordinator tables. | `coordinator` |
+| `scalar.db.consensus_commit.include_metadata.enabled` | If set to `true`, `Get` and `Scan` operations results will contain transaction metadata. To see the transaction metadata columns details for a given table, you can use the `DistributedTransactionAdmin.getTableMetadata()` method, which will return the table metadata augmented with the transaction metadata columns. Using this configuration can be useful to investigate transaction-related issues. | `false` |
+
+#### Performance-related configurations
+
+The following performance-related configurations are available for the Consensus Commit transaction manager:
+
+| Name | Description | Default |
+|-----------------------------------------------------------------|--------------------------------------------------------------------------------|-------------------------------------------------------------------|
+| `scalar.db.consensus_commit.parallel_executor_count` | Number of executors (threads) for parallel execution. | `128` |
+| `scalar.db.consensus_commit.parallel_preparation.enabled` | Whether or not the preparation phase is executed in parallel. | `true` |
+| `scalar.db.consensus_commit.parallel_validation.enabled` | Whether or not the validation phase (in `EXTRA_READ`) is executed in parallel. | The value of `scalar.db.consensus_commit.parallel_commit.enabled` |
+| `scalar.db.consensus_commit.parallel_commit.enabled` | Whether or not the commit phase is executed in parallel. | `true` |
+| `scalar.db.consensus_commit.parallel_rollback.enabled` | Whether or not the rollback phase is executed in parallel. | The value of `scalar.db.consensus_commit.parallel_commit.enabled` |
+| `scalar.db.consensus_commit.async_commit.enabled` | Whether or not the commit phase is executed asynchronously. | `false` |
+| `scalar.db.consensus_commit.async_rollback.enabled` | Whether or not the rollback phase is executed asynchronously. | The value of `scalar.db.consensus_commit.async_commit.enabled` |
+| `scalar.db.consensus_commit.parallel_implicit_pre_read.enabled` | Whether or not implicit pre-read is executed in parallel. | `true` |
+
+#### Underlying storage or database configurations
+
+Consensus Commit has a storage abstraction layer and supports multiple underlying storages. You can specify the storage implementation by using the `scalar.db.storage` property.
+
+Select a database to see the configurations available for each storage.
+
+
+
+
+
+
+
+
+
+
+
+The following configurations are available for Cassandra:
+
+| Name | Description | Default |
+|-----------------------------------------|-----------------------------------------------------------------------|------------|
+| `scalar.db.storage` | `cassandra` must be specified. | - |
+| `scalar.db.contact_points` | Comma-separated contact points. | |
+| `scalar.db.contact_port` | Port number for all the contact points. | |
+| `scalar.db.username` | Username to access the database. | |
+| `scalar.db.password` | Password to access the database. | |
+| `scalar.db.cassandra.metadata.keyspace` | Keyspace name for the namespace and table metadata used for ScalarDB. | `scalardb` |
+
+
+
+
+The following configurations are available for CosmosDB for NoSQL:
+
+| Name | Description | Default |
+|--------------------------------------------|----------------------------------------------------------------------------------------------------------|------------|
+| `scalar.db.storage` | `cosmos` must be specified. | - |
+| `scalar.db.contact_points` | Azure Cosmos DB for NoSQL endpoint with which ScalarDB should communicate. | |
+| `scalar.db.password` | Either a master or read-only key used to perform authentication for accessing Azure Cosmos DB for NoSQL. | |
+| `scalar.db.cosmos.table_metadata.database` | Database name for the table metadata used for ScalarDB. | `scalardb` |
+| `scalar.db.cosmos.consistency_level` | Consistency level used for Cosmos DB operations. `STRONG` or `BOUNDED_STALENESS` can be specified. | `STRONG` |
+
+
+
+
+The following configurations are available for DynamoDB:
+
+| Name | Description | Default |
+|---------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------|
+| `scalar.db.storage` | `dynamo` must be specified. | - |
+| `scalar.db.contact_points` | AWS region with which ScalarDB should communicate (e.g., `us-east-1`). | |
+| `scalar.db.username` | AWS access key used to identify the user interacting with AWS. | |
+| `scalar.db.password` | AWS secret access key used to authenticate the user interacting with AWS. | |
+| `scalar.db.dynamo.endpoint_override` | Amazon DynamoDB endpoint with which ScalarDB should communicate. This is primarily used for testing with a local instance instead of an AWS service. | |
+| `scalar.db.dynamo.table_metadata.namespace` | Namespace name for the table metadata used for ScalarDB. | `scalardb` |
+| `scalar.db.dynamo.namespace.prefix` | Prefix for the user namespaces and metadata namespace names. Since AWS requires having unique tables names in a single AWS region, this is useful if you want to use multiple ScalarDB environments (development, production, etc.) in a single AWS region. | |
+
+
+
+
+The following configurations are available for JDBC databases:
+
+| Name | Description | Default |
+|-----------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------|
+| `scalar.db.storage` | `jdbc` must be specified. | - |
+| `scalar.db.contact_points` | JDBC connection URL. | |
+| `scalar.db.username` | Username to access the database. | |
+| `scalar.db.password` | Password to access the database. | |
+| `scalar.db.jdbc.connection_pool.min_idle` | Minimum number of idle connections in the connection pool. | `20` |
+| `scalar.db.jdbc.connection_pool.max_idle` | Maximum number of connections that can remain idle in the connection pool. | `50` |
+| `scalar.db.jdbc.connection_pool.max_total` | Maximum total number of idle and borrowed connections that can be active at the same time for the connection pool. Use a negative value for no limit. | `100` |
+| `scalar.db.jdbc.prepared_statements_pool.enabled` | Setting this property to `true` enables prepared-statement pooling. | `false` |
+| `scalar.db.jdbc.prepared_statements_pool.max_open` | Maximum number of open statements that can be allocated from the statement pool at the same time. Use a negative value for no limit. | `-1` |
+| `scalar.db.jdbc.isolation_level` | Isolation level for JDBC. `READ_UNCOMMITTED`, `READ_COMMITTED`, `REPEATABLE_READ`, or `SERIALIZABLE` can be specified. | Underlying-database specific |
+| `scalar.db.jdbc.table_metadata.schema` | Schema name for the table metadata used for ScalarDB. | `scalardb` |
+| `scalar.db.jdbc.table_metadata.connection_pool.min_idle` | Minimum number of idle connections in the connection pool for the table metadata. | `5` |
+| `scalar.db.jdbc.table_metadata.connection_pool.max_idle` | Maximum number of connections that can remain idle in the connection pool for the table metadata. | `10` |
+| `scalar.db.jdbc.table_metadata.connection_pool.max_total` | Maximum total number of idle and borrowed connections that can be active at the same time for the connection pool for the table metadata. Use a negative value for no limit. | `25` |
+| `scalar.db.jdbc.admin.connection_pool.min_idle` | Minimum number of idle connections in the connection pool for admin. | `5` |
+| `scalar.db.jdbc.admin.connection_pool.max_idle` | Maximum number of connections that can remain idle in the connection pool for admin. | `10` |
+| `scalar.db.jdbc.admin.connection_pool.max_total` | Maximum total number of idle and borrowed connections that can be active at the same time for the connection pool for admin. Use a negative value for no limit. | `25` |
+
+{% capture notice--info %}
+**Note**
+
+If you use SQLite3 as a JDBC database, you must set `scalar.db.contact_points` as follows.
+
+```properties
+scalar.db.contact_points=jdbc:sqlite:.sqlite3?busy_timeout=10000
+```
+
+Unlike other JDBC databases, [SQLite3 does not fully support concurrent access](https://www.sqlite.org/lang_transaction.html).
+To avoid frequent errors caused internally by [`SQLITE_BUSY`](https://www.sqlite.org/rescode.html#busy), we recommend setting a [`busy_timeout`](https://www.sqlite.org/c3ref/busy_timeout.html) parameter.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+
+
+
+##### Multi-storage support
+
+ScalarDB supports using multiple storage implementations simultaneously. You can use multiple storages by specifying `multi-storage` as the value for the `scalar.db.storage` property.
+
+For details about using multiple storages, see [Multi-Storage Transactions](multi-storage-transactions.md).
+
+### Use Consensus Commit through ScalarDB Cluster
+
+[ScalarDB Cluster (redirects to the Enterprise docs site)](https://scalardb.scalar-labs.com/docs/3.12/scalardb-cluster/) is a component that provides a gRPC interface to ScalarDB.
+
+For details about client configurations, see the ScalarDB Cluster [client configurations (redirects to the Enterprise docs site)](https://scalardb.scalar-labs.com/docs/3.12/scalardb-cluster/developer-guide-for-scalardb-cluster-with-java-api/#client-configurations).
+
+## Cross-partition scan configurations
+
+By enabling the cross-partition scan option below, the `Scan` operation can retrieve all records across partitions. In addition, you can specify arbitrary conditions and orderings in the cross-partition `Scan` operation by enabling `cross_partition_scan.filtering` and `cross_partition_scan.ordering`, respectively. Currently, the cross-partition scan with filtering and ordering is available only for JDBC databases. To enable filtering and ordering, `scalar.db.cross_partition_scan.enabled` must be set to `true`.
+
+For details on how to use cross-partition scan, see [Scan operation](./api-guide.md#scan-operation).
+
+{% capture notice--warning %}
+**Attention**
+
+For non-JDBC databases, we do not recommend enabling cross-partition scan with the `SERIALIAZABLE` isolation level because transactions could be executed at a lower isolation level (that is, `SNAPSHOT`). When using non-JDBC databases, use cross-partition scan at your own risk only if consistency does not matter for your transactions.
+{% endcapture %}
+
+
{{ notice--warning | markdownify }}
+
+| Name | Description | Default |
+|----------------------------------------------------|-----------------------------------------------|---------|
+| `scalar.db.cross_partition_scan.enabled` | Enable cross-partition scan. | `false` |
+| `scalar.db.cross_partition_scan.filtering.enabled` | Enable filtering in cross-partition scan. | `false` |
+| `scalar.db.cross_partition_scan.ordering.enabled` | Enable ordering in cross-partition scan. | `false` |
+
+## Other ScalarDB configurations
+
+The following are additional configurations available for ScalarDB:
+
+| Name | Description | Default |
+|------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------|
+| `scalar.db.metadata.cache_expiration_time_secs` | ScalarDB has a metadata cache to reduce the number of requests to the database. This setting specifies the expiration time of the cache in seconds. | `-1` (no expiration) |
+| `scalar.db.active_transaction_management.expiration_time_millis` | ScalarDB maintains ongoing transactions, which can be resumed by using a transaction ID. This setting specifies the expiration time of this transaction management feature in milliseconds. | `-1` (no expiration) |
+| `scalar.db.default_namespace_name` | The given namespace name will be used by operations that do not already specify a namespace. | |
+
+## Placeholder usage
+
+You can use placeholders in the values, and they are replaced with environment variables (`${env:}`) or system properties (`${sys:}`). You can also specify default values in placeholders like `${sys::-}`.
+
+The following is an example of a configuration that uses placeholders:
+
+```properties
+scalar.db.username=${env::-admin}
+scalar.db.password=${env:}
+```
+
+In this example configuration, ScalarDB reads the username and password from environment variables. If the environment variable `SCALAR_DB_USERNAME` does not exist, ScalarDB uses the default value `admin`.
+
+## Configuration examples
+
+This section provides some configuration examples.
+
+### Configuration example #1 - App and database
+
+```mermaid
+flowchart LR
+ app["App (ScalarDB library with Consensus Commit)"]
+ db[(Underlying storage or database)]
+ app --> db
+```
+
+In this example configuration, the app (ScalarDB library with Consensus Commit) connects to an underlying storage or database (in this case, Cassandra) directly.
+
+{% capture notice--warning %}
+**Attention**
+
+This configuration exists only for development purposes and isn’t suitable for a production environment. This is because the app needs to implement the [Scalar Admin](https://github.com/scalar-labs/scalar-admin) interface to take transactionally consistent backups for ScalarDB, which requires additional configurations.
+{% endcapture %}
+
+
{{ notice--warning | markdownify }}
+
+The following is an example of the configuration for connecting the app to the underlying database through ScalarDB:
+
+```properties
+# Transaction manager implementation.
+scalar.db.transaction_manager=consensus-commit
+
+# Storage implementation.
+scalar.db.storage=cassandra
+
+# Comma-separated contact points.
+scalar.db.contact_points=
+
+# Credential information to access the database.
+scalar.db.username=
+scalar.db.password=
+```
+
+### Configuration example #2 - App, ScalarDB Cluster, and database
+
+```mermaid
+flowchart LR
+ app["App - ScalarDB library with gRPC"]
+ cluster["ScalarDB Cluster - (ScalarDB library with Consensus Commit)"]
+ db[(Underlying storage or database)]
+ app --> cluster --> db
+```
+
+In this example configuration, the app (ScalarDB library with gRPC) connects to an underlying storage or database (in this case, Cassandra) through ScalarDB Cluster, which is a component that is available only in the ScalarDB Enterprise edition.
+
+{% capture notice--info %}
+**Note**
+
+This configuration is acceptable for production use because ScalarDB Cluster implements the [Scalar Admin](https://github.com/scalar-labs/scalar-admin) interface, which enables you to take transactionally consistent backups for ScalarDB by pausing ScalarDB Cluster.
+
+{% endcapture %}
+
+
+
+## Install a JDK
+
+Because ScalarDB is written in Java, you must have one of the following Java Development Kits (JDKs) installed in your environment:
+
+- [Oracle JDK](https://www.oracle.com/java/technologies/downloads/) LTS version (8, 11, or 17)
+- [OpenJDK](https://openjdk.org/install/) LTS version (8, 11, or 17)
+
+{% capture notice--info %}
+**Note**
+
+We recommend using the LTS versions mentioned above, but other non-LTS versions may work.
+
+In addition, other JDKs should work with ScalarDB, but we haven't tested them.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+## Clone the `scalardb` repository
+
+Open a terminal window, and go to your working directory. Then, clone the [scalardb](https://github.com/scalar-labs/scalardb) repository by running the following command:
+
+```shell
+$ git clone https://github.com/scalar-labs/scalardb
+```
+
+Then, go to the `scalardb/docs/getting-started-kotlin` directory in the cloned repository by running the following command:
+
+```shell
+$ cd scalardb/docs/getting-started-kotlin
+```
+
+## Set up your database for ScalarDB
+
+Select your database, and follow the instructions to configure it for ScalarDB.
+
+For a list of databases that ScalarDB supports, see [Supported Databases](scalardb-supported-databases.md).
+
+
+
+
+
+
+
+
+
+
+
+Confirm that you have Cassandra installed. If Cassandra isn't installed, visit [Downloading Cassandra](https://cassandra.apache.org/_/download.html).
+
+### Configure Cassandra
+{:.no_toc}
+
+Open **cassandra.yaml** in your preferred IDE. Then, change `commitlog_sync` from `periodic` to `batch` so that you don't lose data if a quorum of replica nodes goes down.
+
+### Configure ScalarDB
+{:.no_toc}
+
+The following instructions assume that you have properly installed and configured the JDK and Cassandra in your local environment, and Cassandra is running on your localhost.
+
+The **scalardb.properties** file in the `docs/getting-started-kotlin` directory holds database configurations for ScalarDB. The following is a basic configuration for Cassandra. Be sure to change the values for `scalar.db.username` and `scalar.db.password` as described.
+
+```properties
+# The Cassandra storage implementation is used for Consensus Commit.
+scalar.db.storage=cassandra
+
+# Comma-separated contact points.
+scalar.db.contact_points=localhost
+
+# The port number for all the contact points.
+scalar.db.contact_port=9042
+
+# The username and password to access the database.
+scalar.db.username=
+scalar.db.password=
+```
+
+
+
+To use Azure Cosmos DB for NoSQL, you must have an Azure account. If you don't have an Azure account, visit [Create an Azure Cosmos DB account](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/quickstart-portal#create-account).
+
+### Configure Cosmos DB for NoSQL
+{:.no_toc}
+
+Set the **default consistency level** to **Strong** according to the official document at [Configure the default consistency level](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/how-to-manage-consistency#configure-the-default-consistency-level).
+
+### Configure ScalarDB
+{:.no_toc}
+
+The following instructions assume that you have properly installed and configured the JDK in your local environment and properly configured your Cosmos DB for NoSQL account in Azure.
+
+The **scalardb.properties** file in the `docs/getting-started-kotlin` directory holds database configurations for ScalarDB. Be sure to change the values for `scalar.db.contact_points` and `scalar.db.password` as described.
+
+```properties
+# The Cosmos DB for NoSQL storage implementation is used for Consensus Commit.
+scalar.db.storage=cosmos
+
+# The Cosmos DB for NoSQL URI.
+scalar.db.contact_points=
+
+# The Cosmos DB for NoSQL key to access the database.
+scalar.db.password=
+```
+
+{% capture notice--info %}
+**Note**
+
+You can use a primary key or a secondary key as the value for `scalar.db.password`.
+{% endcapture %}
+
{{ notice--info | markdownify }}
+
+
+
+To use Amazon DynamoDB, you must have an AWS account. If you don't have an AWS account, visit [Getting started: Are you a first-time AWS user?](https://docs.aws.amazon.com/accounts/latest/reference/welcome-first-time-user.html).
+
+### Configure ScalarDB
+{:.no_toc}
+
+The following instructions assume that you have properly installed and configured the JDK in your local environment.
+
+The **scalardb.properties** file in the `docs/getting-started-kotlin` directory holds database configurations for ScalarDB. Be sure to change the values for `scalar.db.contact_points`, `scalar.db.username`, and `scalar.db.password` as described.
+
+```properties
+# The DynamoDB storage implementation is used for Consensus Commit.
+scalar.db.storage=dynamo
+
+# The AWS region.
+scalar.db.contact_points=
+
+# The AWS access key ID and secret access key to access the database.
+scalar.db.username=
+scalar.db.password=
+```
+
+
+
+Confirm that you have a JDBC database installed. For a list of supported JDBC databases, see [Supported Databases](scalardb-supported-databases.md).
+
+### Configure ScalarDB
+{:.no_toc}
+
+The following instructions assume that you have properly installed and configured the JDK and JDBC database in your local environment, and the JDBC database is running on your localhost.
+
+The **scalardb.properties** file in the `docs/getting-started-kotlin` directory holds database configurations for ScalarDB. The following is a basic configuration for JDBC databases.
+
+{% capture notice--info %}
+**Note**
+
+Be sure to uncomment the `scalar.db.contact_points` variable and change the value of the JDBC database you are using, and change the values for `scalar.db.username` and `scalar.db.password` as described.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+```properties
+# The JDBC database storage implementation is used for Consensus Commit.
+scalar.db.storage=jdbc
+
+# The JDBC database URL for the type of database you are using.
+# scalar.db.contact_points=jdbc:mysql://localhost:3306/
+# scalar.db.contact_points=jdbc:oracle:thin:@//localhost:1521/
+# scalar.db.contact_points=jdbc:postgresql://localhost:5432/
+# scalar.db.contact_points=jdbc:sqlserver://localhost:1433;
+# scalar.db.contact_points=jdbc:sqlite://localhost:3306.sqlite3?busy_timeout=10000
+
+# The username and password for connecting to the database.
+scalar.db.username=
+scalar.db.password=
+```
+
+
+
+## Create and load the database schema
+
+You need to define the database schema (the method in which the data will be organized) in the application. For details about the supported data types, see [Data type mapping between ScalarDB and other databases](schema-loader.md#data-type-mapping-between-scalardb-and-the-other-databases).
+
+For this tutorial, create a file named **emoney.json** in the `scalardb/docs/getting-started-kotlin` directory. Then, add the following JSON code to define the schema.
+
+```json
+{
+ "emoney.account": {
+ "transaction": true,
+ "partition-key": [
+ "id"
+ ],
+ "clustering-key": [],
+ "columns": {
+ "id": "TEXT",
+ "balance": "INT"
+ }
+ }
+}
+```
+
+To apply the schema, go to the [`scalardb` Releases](https://github.com/scalar-labs/scalardb/releases) page and download the ScalarDB Schema Loader that matches the version of ScalarDB that you are using to the `getting-started` folder.
+
+Then, run the following command, replacing `` with the version of the ScalarDB Schema Loader that you downloaded:
+
+```shell
+$ java -jar scalardb-schema-loader-.jar --config scalardb.properties --schema-file emoney.json --coordinator
+```
+
+{% capture notice--info %}
+**Note**
+
+The `--coordinator` option is specified because a table with `transaction` set to `true` exists in the schema. For details about configuring and loading a schema, see [ScalarDB Schema Loader](schema-loader.md).
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+## Execute transactions and retrieve data in the basic electronic money application
+
+After loading the schema, you can execute transactions and retrieve data in the basic electronic money application that is included in the repository that you cloned.
+
+The application supports the following types of transactions:
+
+- Create an account.
+- Add funds to an account.
+- Send funds between two accounts.
+- Get an account balance.
+
+{% capture notice--info %}
+**Note**
+
+When you first execute a Gradle command, Gradle will automatically install the necessary libraries.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+### Create an account with a balance
+
+You need an account with a balance so that you can send funds between accounts.
+
+To create an account for **customer1** that has a balance of **500**, run the following command:
+
+```shell
+$ ./gradlew run --args="-action charge -amount 500 -to customer1"
+```
+
+### Create an account without a balance
+
+After setting up an account that has a balance, you need another account for sending funds to.
+
+To create an account for **merchant1** that has a balance of **0**, run the following command:
+
+```shell
+$ ./gradlew run --args="-action charge -amount 0 -to merchant1"
+```
+
+### Add funds to an account
+
+You can add funds to an account in the same way that you created and added funds to an account in [Create an account with a balance](#create-an-account-with-a-balance).
+
+To add **500** to the account for **customer1**, run the following command:
+
+```shell
+$ ./gradlew run --args="-action charge -amount 500 -to customer1"
+```
+
+The account for **customer1** will now have a balance of **1000**.
+
+### Send electronic money between two accounts
+
+Now that you have created two accounts, with at least one of those accounts having a balance, you can send funds from one account to the other account.
+
+To have **customer1** pay **100** to **merchant1**, run the following command:
+
+```shell
+$ ./gradlew run --args="-action pay -amount 100 -from customer1 -to merchant1"
+```
+
+### Get an account balance
+
+After sending funds from one account to the other, you can check the balance of each account.
+
+To get the balance of **customer1**, run the following command:
+
+```shell
+$ ./gradlew run --args="-action getBalance -id customer1"
+```
+
+You should see the following output:
+
+```shell
+...
+The balance for customer1 is 900
+...
+```
+
+To get the balance of **merchant1**, run the following command:
+
+```shell
+$ ./gradlew run --args="-action getBalance -id merchant1"
+```
+
+You should see the following output:
+
+```shell
+...
+The balance for merchant1 is 100
+...
+```
+
+## Reference
+
+To see the source code for the electronic money application used in this tutorial, see [`ElectronicMoney.kt`](./getting-started-kotlin/src/main/kotlin/sample/ElectronicMoney.kt).
diff --git a/docs/3.12/getting-started-with-scalardb.md b/docs/3.12/getting-started-with-scalardb.md
new file mode 100644
index 00000000..bd6f7b3d
--- /dev/null
+++ b/docs/3.12/getting-started-with-scalardb.md
@@ -0,0 +1,316 @@
+# Getting Started with ScalarDB
+
+This getting started tutorial explains how to configure your preferred database in ScalarDB and set up a basic electronic money application.
+
+{% capture notice--warning %}
+**Warning**
+
+The electronic money application is simplified for this tutorial and isn't suitable for a production environment.
+{% endcapture %}
+
+
{{ notice--warning | markdownify }}
+
+## Install a JDK
+
+Because ScalarDB is written in Java, you must have one of the following Java Development Kits (JDKs) installed in your environment:
+
+- [Oracle JDK](https://www.oracle.com/java/technologies/downloads/) LTS version (8, 11, or 17)
+- [OpenJDK](https://openjdk.org/install/) LTS version (8, 11, or 17)
+
+{% capture notice--info %}
+**Note**
+
+We recommend using the LTS versions mentioned above, but other non-LTS versions may work.
+
+In addition, other JDKs should work with ScalarDB, but we haven't tested them.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+## Clone the `scalardb` repository
+
+Open a terminal window, and go to your working directory. Then, clone the [scalardb](https://github.com/scalar-labs/scalardb) repository by running the following command:
+
+```shell
+$ git clone https://github.com/scalar-labs/scalardb
+```
+
+Then, go to the `scalardb/docs/getting-started` directory in the cloned repository by running the following command:
+
+```shell
+$ cd scalardb/docs/getting-started
+```
+
+## Set up your database for ScalarDB
+
+Select your database, and follow the instructions to configure it for ScalarDB.
+
+For a list of databases that ScalarDB supports, see [Supported Databases](scalardb-supported-databases.md).
+
+
+
+
+
+
+
+
+
+
+
+Confirm that you have Cassandra installed. If Cassandra isn't installed, visit [Downloading Cassandra](https://cassandra.apache.org/_/download.html).
+
+### Configure Cassandra
+{:.no_toc}
+
+Open **cassandra.yaml** in your preferred IDE. Then, change `commitlog_sync` from `periodic` to `batch` so that you don't lose data if a quorum of replica nodes goes down.
+
+### Configure ScalarDB
+{:.no_toc}
+
+The following instructions assume that you have properly installed and configured the JDK and Cassandra in your local environment, and Cassandra is running on your localhost.
+
+The **scalardb.properties** file in the `docs/getting-started` directory holds database configurations for ScalarDB. The following is a basic configuration for Cassandra. Be sure to change the values for `scalar.db.username` and `scalar.db.password` as described.
+
+```properties
+# The Cassandra storage implementation is used for Consensus Commit.
+scalar.db.storage=cassandra
+
+# Comma-separated contact points.
+scalar.db.contact_points=localhost
+
+# The port number for all the contact points.
+scalar.db.contact_port=9042
+
+# The username and password to access the database.
+scalar.db.username=
+scalar.db.password=
+```
+
+
+
+To use Azure Cosmos DB for NoSQL, you must have an Azure account. If you don't have an Azure account, visit [Create an Azure Cosmos DB account](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/quickstart-portal#create-account).
+
+### Configure Cosmos DB for NoSQL
+{:.no_toc}
+
+Set the **default consistency level** to **Strong** according to the official document at [Configure the default consistency level](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/how-to-manage-consistency#configure-the-default-consistency-level).
+
+### Configure ScalarDB
+{:.no_toc}
+
+The following instructions assume that you have properly installed and configured the JDK in your local environment and properly configured your Cosmos DB for NoSQL account in Azure.
+
+The **scalardb.properties** file in the `docs/getting-started` directory holds database configurations for ScalarDB. Be sure to change the values for `scalar.db.contact_points` and `scalar.db.password` as described.
+
+```properties
+# The Cosmos DB for NoSQL storage implementation is used for Consensus Commit.
+scalar.db.storage=cosmos
+
+# The Cosmos DB for NoSQL URI.
+scalar.db.contact_points=
+
+# The Cosmos DB for NoSQL key to access the database.
+scalar.db.password=
+```
+
+{% capture notice--info %}
+**Note**
+
+You can use a primary key or a secondary key as the value for `scalar.db.password`.
+{% endcapture %}
+
{{ notice--info | markdownify }}
+
+
+
+To use Amazon DynamoDB, you must have an AWS account. If you don't have an AWS account, visit [Getting started: Are you a first-time AWS user?](https://docs.aws.amazon.com/accounts/latest/reference/welcome-first-time-user.html).
+
+### Configure ScalarDB
+{:.no_toc}
+
+The following instructions assume that you have properly installed and configured the JDK in your local environment.
+
+The **scalardb.properties** file in the `docs/getting-started` directory holds database configurations for ScalarDB. Be sure to change the values for `scalar.db.contact_points`, `scalar.db.username`, and `scalar.db.password` as described.
+
+```properties
+# The DynamoDB storage implementation is used for Consensus Commit.
+scalar.db.storage=dynamo
+
+# The AWS region.
+scalar.db.contact_points=
+
+# The AWS access key ID and secret access key to access the database.
+scalar.db.username=
+scalar.db.password=
+```
+
+
+
+Confirm that you have a JDBC database installed. For a list of supported JDBC databases, see [Supported Databases](scalardb-supported-databases.md).
+
+### Configure ScalarDB
+{:.no_toc}
+
+The following instructions assume that you have properly installed and configured the JDK and JDBC database in your local environment, and the JDBC database is running on your localhost.
+
+The **scalardb.properties** file in the `docs/getting-started` directory holds database configurations for ScalarDB. The following is a basic configuration for JDBC databases.
+
+{% capture notice--info %}
+**Note**
+
+Be sure to uncomment the `scalar.db.contact_points` variable and change the value of the JDBC database you are using, and change the values for `scalar.db.username` and `scalar.db.password` as described.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+```properties
+# The JDBC database storage implementation is used for Consensus Commit.
+scalar.db.storage=jdbc
+
+# The JDBC database URL for the type of database you are using.
+# scalar.db.contact_points=jdbc:mysql://localhost:3306/
+# scalar.db.contact_points=jdbc:oracle:thin:@//localhost:1521/
+# scalar.db.contact_points=jdbc:postgresql://localhost:5432/
+# scalar.db.contact_points=jdbc:sqlserver://localhost:1433;
+# scalar.db.contact_points=jdbc:sqlite://localhost:3306.sqlite3?busy_timeout=10000
+
+# The username and password for connecting to the database.
+scalar.db.username=
+scalar.db.password=
+```
+
+
+
+## Create and load the database schema
+
+You need to define the database schema (the method in which the data will be organized) in the application. For details about the supported data types, see [Data type mapping between ScalarDB and other databases](schema-loader.md#data-type-mapping-between-scalardb-and-the-other-databases).
+
+For this tutorial, create a file named **emoney.json** in the `scalardb/docs/getting-started` directory. Then, add the following JSON code to define the schema.
+
+```json
+{
+ "emoney.account": {
+ "transaction": true,
+ "partition-key": [
+ "id"
+ ],
+ "clustering-key": [],
+ "columns": {
+ "id": "TEXT",
+ "balance": "INT"
+ }
+ }
+}
+```
+
+To apply the schema, go to the [`scalardb` Releases](https://github.com/scalar-labs/scalardb/releases) page and download the ScalarDB Schema Loader that matches the version of ScalarDB that you are using to the `getting-started` folder.
+
+Then, run the following command, replacing `` with the version of the ScalarDB Schema Loader that you downloaded:
+
+```shell
+$ java -jar scalardb-schema-loader-.jar --config scalardb.properties --schema-file emoney.json --coordinator
+```
+
+{% capture notice--info %}
+**Note**
+
+The `--coordinator` option is specified because a table with `transaction` set to `true` exists in the schema. For details about configuring and loading a schema, see [ScalarDB Schema Loader](schema-loader.md).
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+## Execute transactions and retrieve data in the basic electronic money application
+
+After loading the schema, you can execute transactions and retrieve data in the basic electronic money application that is included in the repository that you cloned.
+
+The application supports the following types of transactions:
+
+- Create an account.
+- Add funds to an account.
+- Send funds between two accounts.
+- Get an account balance.
+
+{% capture notice--info %}
+**Note**
+
+When you first execute a Gradle command, Gradle will automatically install the necessary libraries.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+### Create an account with a balance
+
+You need an account with a balance so that you can send funds between accounts.
+
+To create an account for **customer1** that has a balance of **500**, run the following command:
+
+```shell
+$ ./gradlew run --args="-action charge -amount 500 -to customer1"
+```
+
+### Create an account without a balance
+
+After setting up an account that has a balance, you need another account for sending funds to.
+
+To create an account for **merchant1** that has a balance of **0**, run the following command:
+
+```shell
+$ ./gradlew run --args="-action charge -amount 0 -to merchant1"
+```
+
+### Add funds to an account
+
+You can add funds to an account in the same way that you created and added funds to an account in [Create an account with a balance](#create-an-account-with-a-balance).
+
+To add **500** to the account for **customer1**, run the following command:
+
+```shell
+$ ./gradlew run --args="-action charge -amount 500 -to customer1"
+```
+
+The account for **customer1** will now have a balance of **1000**.
+
+### Send electronic money between two accounts
+
+Now that you have created two accounts, with at least one of those accounts having a balance, you can send funds from one account to the other account.
+
+To have **customer1** pay **100** to **merchant1**, run the following command:
+
+```shell
+$ ./gradlew run --args="-action pay -amount 100 -from customer1 -to merchant1"
+```
+
+### Get an account balance
+
+After sending funds from one account to the other, you can check the balance of each account.
+
+To get the balance of **customer1**, run the following command:
+
+```shell
+$ ./gradlew run --args="-action getBalance -id customer1"
+```
+
+You should see the following output:
+
+```shell
+...
+The balance for customer1 is 900
+...
+```
+
+To get the balance of **merchant1**, run the following command:
+
+```shell
+$ ./gradlew run --args="-action getBalance -id merchant1"
+```
+
+You should see the following output:
+
+```shell
+...
+The balance for merchant1 is 100
+...
+```
+
+## Reference
+
+To see the source code for the electronic money application used in this tutorial, see [`ElectronicMoney.java`](./getting-started/src/main/java/sample/ElectronicMoney.java).
diff --git a/docs/3.12/getting-started/build.gradle b/docs/3.12/getting-started/build.gradle
new file mode 100644
index 00000000..7bd82d12
--- /dev/null
+++ b/docs/3.12/getting-started/build.gradle
@@ -0,0 +1,17 @@
+apply plugin: 'java'
+apply plugin: 'idea'
+apply plugin: 'application'
+
+repositories {
+ mavenCentral()
+}
+
+mainClassName = "sample.ElectronicMoneyMain"
+
+dependencies {
+ implementation 'com.scalar-labs:scalardb:3.12.0'
+ implementation 'org.slf4j:slf4j-simple:1.7.30'
+}
+
+sourceCompatibility = 1.8
+targetCompatibility = 1.8
diff --git a/docs/3.12/getting-started/build/classes/java/main/sample/ElectronicMoney.class b/docs/3.12/getting-started/build/classes/java/main/sample/ElectronicMoney.class
new file mode 100644
index 00000000..207c9113
Binary files /dev/null and b/docs/3.12/getting-started/build/classes/java/main/sample/ElectronicMoney.class differ
diff --git a/docs/3.12/getting-started/build/classes/java/main/sample/ElectronicMoneyMain.class b/docs/3.12/getting-started/build/classes/java/main/sample/ElectronicMoneyMain.class
new file mode 100644
index 00000000..e47cf1c9
Binary files /dev/null and b/docs/3.12/getting-started/build/classes/java/main/sample/ElectronicMoneyMain.class differ
diff --git a/docs/3.12/getting-started/build/tmp/compileJava/previous-compilation-data.bin b/docs/3.12/getting-started/build/tmp/compileJava/previous-compilation-data.bin
new file mode 100644
index 00000000..1faadeb3
Binary files /dev/null and b/docs/3.12/getting-started/build/tmp/compileJava/previous-compilation-data.bin differ
diff --git a/docs/3.12/getting-started/gradle/wrapper/gradle-wrapper.jar b/docs/3.12/getting-started/gradle/wrapper/gradle-wrapper.jar
new file mode 100644
index 00000000..7454180f
Binary files /dev/null and b/docs/3.12/getting-started/gradle/wrapper/gradle-wrapper.jar differ
diff --git a/docs/3.12/getting-started/gradle/wrapper/gradle-wrapper.properties b/docs/3.12/getting-started/gradle/wrapper/gradle-wrapper.properties
new file mode 100644
index 00000000..070cb702
--- /dev/null
+++ b/docs/3.12/getting-started/gradle/wrapper/gradle-wrapper.properties
@@ -0,0 +1,5 @@
+distributionBase=GRADLE_USER_HOME
+distributionPath=wrapper/dists
+distributionUrl=https\://services.gradle.org/distributions/gradle-7.6-bin.zip
+zipStoreBase=GRADLE_USER_HOME
+zipStorePath=wrapper/dists
diff --git a/docs/3.12/getting-started/gradlew b/docs/3.12/getting-started/gradlew
new file mode 100755
index 00000000..744e882e
--- /dev/null
+++ b/docs/3.12/getting-started/gradlew
@@ -0,0 +1,185 @@
+#!/usr/bin/env sh
+
+#
+# Copyright 2015 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+##############################################################################
+##
+## Gradle start up script for UN*X
+##
+##############################################################################
+
+# Attempt to set APP_HOME
+# Resolve links: $0 may be a link
+PRG="$0"
+# Need this for relative symlinks.
+while [ -h "$PRG" ] ; do
+ ls=`ls -ld "$PRG"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '/.*' > /dev/null; then
+ PRG="$link"
+ else
+ PRG=`dirname "$PRG"`"/$link"
+ fi
+done
+SAVED="`pwd`"
+cd "`dirname \"$PRG\"`/" >/dev/null
+APP_HOME="`pwd -P`"
+cd "$SAVED" >/dev/null
+
+APP_NAME="Gradle"
+APP_BASE_NAME=`basename "$0"`
+
+# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
+
+# Use the maximum available, or set MAX_FD != -1 to use that value.
+MAX_FD="maximum"
+
+warn () {
+ echo "$*"
+}
+
+die () {
+ echo
+ echo "$*"
+ echo
+ exit 1
+}
+
+# OS specific support (must be 'true' or 'false').
+cygwin=false
+msys=false
+darwin=false
+nonstop=false
+case "`uname`" in
+ CYGWIN* )
+ cygwin=true
+ ;;
+ Darwin* )
+ darwin=true
+ ;;
+ MSYS* | MINGW* )
+ msys=true
+ ;;
+ NONSTOP* )
+ nonstop=true
+ ;;
+esac
+
+CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
+
+
+# Determine the Java command to use to start the JVM.
+if [ -n "$JAVA_HOME" ] ; then
+ if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
+ # IBM's JDK on AIX uses strange locations for the executables
+ JAVACMD="$JAVA_HOME/jre/sh/java"
+ else
+ JAVACMD="$JAVA_HOME/bin/java"
+ fi
+ if [ ! -x "$JAVACMD" ] ; then
+ die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
+
+Please set the JAVA_HOME variable in your environment to match the
+location of your Java installation."
+ fi
+else
+ JAVACMD="java"
+ which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
+
+Please set the JAVA_HOME variable in your environment to match the
+location of your Java installation."
+fi
+
+# Increase the maximum file descriptors if we can.
+if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
+ MAX_FD_LIMIT=`ulimit -H -n`
+ if [ $? -eq 0 ] ; then
+ if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
+ MAX_FD="$MAX_FD_LIMIT"
+ fi
+ ulimit -n $MAX_FD
+ if [ $? -ne 0 ] ; then
+ warn "Could not set maximum file descriptor limit: $MAX_FD"
+ fi
+ else
+ warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
+ fi
+fi
+
+# For Darwin, add options to specify how the application appears in the dock
+if $darwin; then
+ GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
+fi
+
+# For Cygwin or MSYS, switch paths to Windows format before running java
+if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then
+ APP_HOME=`cygpath --path --mixed "$APP_HOME"`
+ CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
+
+ JAVACMD=`cygpath --unix "$JAVACMD"`
+
+ # We build the pattern for arguments to be converted via cygpath
+ ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
+ SEP=""
+ for dir in $ROOTDIRSRAW ; do
+ ROOTDIRS="$ROOTDIRS$SEP$dir"
+ SEP="|"
+ done
+ OURCYGPATTERN="(^($ROOTDIRS))"
+ # Add a user-defined pattern to the cygpath arguments
+ if [ "$GRADLE_CYGPATTERN" != "" ] ; then
+ OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
+ fi
+ # Now convert the arguments - kludge to limit ourselves to /bin/sh
+ i=0
+ for arg in "$@" ; do
+ CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
+ CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
+
+ if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
+ eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
+ else
+ eval `echo args$i`="\"$arg\""
+ fi
+ i=`expr $i + 1`
+ done
+ case $i in
+ 0) set -- ;;
+ 1) set -- "$args0" ;;
+ 2) set -- "$args0" "$args1" ;;
+ 3) set -- "$args0" "$args1" "$args2" ;;
+ 4) set -- "$args0" "$args1" "$args2" "$args3" ;;
+ 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
+ 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
+ 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
+ 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
+ 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
+ esac
+fi
+
+# Escape application args
+save () {
+ for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
+ echo " "
+}
+APP_ARGS=`save "$@"`
+
+# Collect all arguments for the java command, following the shell quoting and substitution rules
+eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
+
+exec "$JAVACMD" "$@"
diff --git a/docs/3.12/getting-started/gradlew.bat b/docs/3.12/getting-started/gradlew.bat
new file mode 100644
index 00000000..107acd32
--- /dev/null
+++ b/docs/3.12/getting-started/gradlew.bat
@@ -0,0 +1,89 @@
+@rem
+@rem Copyright 2015 the original author or authors.
+@rem
+@rem Licensed under the Apache License, Version 2.0 (the "License");
+@rem you may not use this file except in compliance with the License.
+@rem You may obtain a copy of the License at
+@rem
+@rem https://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+
+@if "%DEBUG%" == "" @echo off
+@rem ##########################################################################
+@rem
+@rem Gradle startup script for Windows
+@rem
+@rem ##########################################################################
+
+@rem Set local scope for the variables with windows NT shell
+if "%OS%"=="Windows_NT" setlocal
+
+set DIRNAME=%~dp0
+if "%DIRNAME%" == "" set DIRNAME=.
+set APP_BASE_NAME=%~n0
+set APP_HOME=%DIRNAME%
+
+@rem Resolve any "." and ".." in APP_HOME to make it shorter.
+for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
+
+@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
+
+@rem Find java.exe
+if defined JAVA_HOME goto findJavaFromJavaHome
+
+set JAVA_EXE=java.exe
+%JAVA_EXE% -version >NUL 2>&1
+if "%ERRORLEVEL%" == "0" goto execute
+
+echo.
+echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
+echo.
+echo Please set the JAVA_HOME variable in your environment to match the
+echo location of your Java installation.
+
+goto fail
+
+:findJavaFromJavaHome
+set JAVA_HOME=%JAVA_HOME:"=%
+set JAVA_EXE=%JAVA_HOME%/bin/java.exe
+
+if exist "%JAVA_EXE%" goto execute
+
+echo.
+echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
+echo.
+echo Please set the JAVA_HOME variable in your environment to match the
+echo location of your Java installation.
+
+goto fail
+
+:execute
+@rem Setup the command line
+
+set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
+
+
+@rem Execute Gradle
+"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %*
+
+:end
+@rem End local scope for the variables with windows NT shell
+if "%ERRORLEVEL%"=="0" goto mainEnd
+
+:fail
+rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
+rem the _cmd.exe /c_ return code!
+if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
+exit /b 1
+
+:mainEnd
+if "%OS%"=="Windows_NT" endlocal
+
+:omega
diff --git a/docs/3.12/getting-started/scalardb-schema-loader-3.10.1.jar b/docs/3.12/getting-started/scalardb-schema-loader-3.10.1.jar
new file mode 100644
index 00000000..581fc7ba
Binary files /dev/null and b/docs/3.12/getting-started/scalardb-schema-loader-3.10.1.jar differ
diff --git a/docs/3.12/getting-started/scalardb.properties b/docs/3.12/getting-started/scalardb.properties
new file mode 100755
index 00000000..b1dbcbde
--- /dev/null
+++ b/docs/3.12/getting-started/scalardb.properties
@@ -0,0 +1,12 @@
+# Comma-separated contact points
+scalar.db.contact_points=localhost
+
+# Port number for all the contact points. Default port number for each database is used if empty.
+#scalar.db.contact_port=
+
+# Credential information to access the database
+scalar.db.username=cassandra
+scalar.db.password=cassandra
+
+# Storage implementation. Either cassandra or cosmos or dynamo or jdbc can be set. Default storage is cassandra.
+#scalar.db.storage=cassandra
diff --git a/docs/3.12/getting-started/settings.gradle b/docs/3.12/getting-started/settings.gradle
new file mode 100644
index 00000000..744e2a3e
--- /dev/null
+++ b/docs/3.12/getting-started/settings.gradle
@@ -0,0 +1 @@
+rootProject.name = 'getting-started'
diff --git a/docs/3.12/getting-started/src/main/java/sample/ElectronicMoney.java b/docs/3.12/getting-started/src/main/java/sample/ElectronicMoney.java
new file mode 100644
index 00000000..2af60ca2
--- /dev/null
+++ b/docs/3.12/getting-started/src/main/java/sample/ElectronicMoney.java
@@ -0,0 +1,153 @@
+package sample;
+
+import com.scalar.db.api.DistributedTransaction;
+import com.scalar.db.api.DistributedTransactionManager;
+import com.scalar.db.api.Get;
+import com.scalar.db.api.Put;
+import com.scalar.db.api.Result;
+import com.scalar.db.exception.transaction.TransactionException;
+import com.scalar.db.io.Key;
+import com.scalar.db.service.TransactionFactory;
+import java.io.IOException;
+import java.util.Optional;
+
+public class ElectronicMoney {
+
+ private static final String NAMESPACE = "emoney";
+ private static final String TABLENAME = "account";
+ private static final String ID = "id";
+ private static final String BALANCE = "balance";
+
+ private final DistributedTransactionManager manager;
+
+ public ElectronicMoney(String scalarDBProperties) throws IOException {
+ TransactionFactory factory = TransactionFactory.create(scalarDBProperties);
+ manager = factory.getTransactionManager();
+ }
+
+ public void charge(String id, int amount) throws TransactionException {
+ // Start a transaction
+ DistributedTransaction tx = manager.start();
+
+ try {
+ // Retrieve the current balance for id
+ Get get =
+ Get.newBuilder()
+ .namespace(NAMESPACE)
+ .table(TABLENAME)
+ .partitionKey(Key.ofText(ID, id))
+ .build();
+ Optional result = tx.get(get);
+
+ // Calculate the balance
+ int balance = amount;
+ if (result.isPresent()) {
+ int current = result.get().getInt(BALANCE);
+ balance += current;
+ }
+
+ // Update the balance
+ Put put =
+ Put.newBuilder()
+ .namespace(NAMESPACE)
+ .table(TABLENAME)
+ .partitionKey(Key.ofText(ID, id))
+ .intValue(BALANCE, balance)
+ .build();
+ tx.put(put);
+
+ // Commit the transaction (records are automatically recovered in case of failure)
+ tx.commit();
+ } catch (Exception e) {
+ tx.abort();
+ throw e;
+ }
+ }
+
+ public void pay(String fromId, String toId, int amount) throws TransactionException {
+ // Start a transaction
+ DistributedTransaction tx = manager.start();
+
+ try {
+ // Retrieve the current balances for ids
+ Get fromGet =
+ Get.newBuilder()
+ .namespace(NAMESPACE)
+ .table(TABLENAME)
+ .partitionKey(Key.ofText(ID, fromId))
+ .build();
+ Get toGet =
+ Get.newBuilder()
+ .namespace(NAMESPACE)
+ .table(TABLENAME)
+ .partitionKey(Key.ofText(ID, toId))
+ .build();
+ Optional fromResult = tx.get(fromGet);
+ Optional toResult = tx.get(toGet);
+
+ // Calculate the balances (it assumes that both accounts exist)
+ int newFromBalance = fromResult.get().getInt(BALANCE) - amount;
+ int newToBalance = toResult.get().getInt(BALANCE) + amount;
+ if (newFromBalance < 0) {
+ throw new RuntimeException(fromId + " doesn't have enough balance.");
+ }
+
+ // Update the balances
+ Put fromPut =
+ Put.newBuilder()
+ .namespace(NAMESPACE)
+ .table(TABLENAME)
+ .partitionKey(Key.ofText(ID, fromId))
+ .intValue(BALANCE, newFromBalance)
+ .build();
+ Put toPut =
+ Put.newBuilder()
+ .namespace(NAMESPACE)
+ .table(TABLENAME)
+ .partitionKey(Key.ofText(ID, toId))
+ .intValue(BALANCE, newToBalance)
+ .build();
+ tx.put(fromPut);
+ tx.put(toPut);
+
+ // Commit the transaction (records are automatically recovered in case of failure)
+ tx.commit();
+ } catch (Exception e) {
+ tx.abort();
+ throw e;
+ }
+ }
+
+ public int getBalance(String id) throws TransactionException {
+ // Start a transaction
+ DistributedTransaction tx = manager.start();
+
+ try {
+ // Retrieve the current balances for id
+ Get get =
+ Get.newBuilder()
+ .namespace(NAMESPACE)
+ .table(TABLENAME)
+ .partitionKey(Key.ofText(ID, id))
+ .build();
+ Optional result = tx.get(get);
+
+ int balance = -1;
+ if (result.isPresent()) {
+ balance = result.get().getInt(BALANCE);
+ }
+
+ // Commit the transaction
+ tx.commit();
+
+ return balance;
+ } catch (Exception e) {
+ tx.abort();
+ throw e;
+ }
+ }
+
+ public void close() {
+ manager.close();
+ }
+}
diff --git a/docs/3.12/getting-started/src/main/java/sample/ElectronicMoneyMain.java b/docs/3.12/getting-started/src/main/java/sample/ElectronicMoneyMain.java
new file mode 100644
index 00000000..53348744
--- /dev/null
+++ b/docs/3.12/getting-started/src/main/java/sample/ElectronicMoneyMain.java
@@ -0,0 +1,75 @@
+package sample;
+
+import java.io.File;
+
+public class ElectronicMoneyMain {
+
+ public static void main(String[] args) throws Exception {
+ String action = null;
+ int amount = 0;
+ String to = null;
+ String from = null;
+ String id = null;
+ String scalarDBProperties = null;
+
+ for (int i = 0; i < args.length; ++i) {
+ if ("-action".equals(args[i])) {
+ action = args[++i];
+ } else if ("-amount".equals(args[i])) {
+ amount = Integer.parseInt(args[++i]);
+ } else if ("-to".equals(args[i])) {
+ to = args[++i];
+ } else if ("-from".equals(args[i])) {
+ from = args[++i];
+ } else if ("-id".equals(args[i])) {
+ id = args[++i];
+ } else if ("-config".equals(args[i])) {
+ scalarDBProperties = args[++i];
+ } else if ("-help".equals(args[i])) {
+ printUsageAndExit();
+ return;
+ }
+ }
+
+ if (action == null) {
+ printUsageAndExit();
+ return;
+ }
+
+ ElectronicMoney eMoney;
+ if (scalarDBProperties != null) {
+ eMoney = new ElectronicMoney(scalarDBProperties);
+ } else {
+ scalarDBProperties = System.getProperty("user.dir") + File.separator + "scalardb.properties";
+ eMoney = new ElectronicMoney(scalarDBProperties);
+ }
+
+ if (action.equalsIgnoreCase("charge")) {
+ if (to == null || amount < 0) {
+ printUsageAndExit();
+ return;
+ }
+ eMoney.charge(to, amount);
+ } else if (action.equalsIgnoreCase("pay")) {
+ if (to == null || amount < 0 || from == null) {
+ printUsageAndExit();
+ return;
+ }
+ eMoney.pay(from, to, amount);
+ } else if (action.equalsIgnoreCase("getBalance")) {
+ if (id == null) {
+ printUsageAndExit();
+ return;
+ }
+ int balance = eMoney.getBalance(id);
+ System.out.println("The balance for " + id + " is " + balance);
+ }
+ eMoney.close();
+ }
+
+ private static void printUsageAndExit() {
+ System.err.println(
+ "ElectronicMoneyMain -action charge/pay/getBalance [-amount number (needed for charge and pay)] [-to id (needed for charge and pay)] [-from id (needed for pay)] [-id id (needed for getBalance)]");
+ System.exit(1);
+ }
+}
diff --git a/docs/3.12/guides.md b/docs/3.12/guides.md
new file mode 100644
index 00000000..be256305
--- /dev/null
+++ b/docs/3.12/guides.md
@@ -0,0 +1,15 @@
+---
+toc: false
+---
+
+# Developer Guides for ScalarDB
+
+The following is a list of developer guides for ScalarDB:
+
+- [ScalarDB Java API Guide](api-guide.md)
+- [ScalarDB JDBC Guide](scalardb-sql/jdbc-guide.md)
+- [ScalarDB SQL API Guide](scalardb-sql/sql-api-guide.md)
+- [Spring Data Integration with ScalarDB Guide](scalardb-sql/spring-data-guide.md)
+- [Developer Guide for ScalarDB Cluster with the Java API](scalardb-cluster/developer-guide-for-scalardb-cluster-with-java-api.md)
+- [ScalarDB Cluster gRPC API Guide](scalardb-cluster/scalardb-cluster-grpc-api-guide.md)
+- [ScalarDB Cluster SQL gRPC API Guide](scalardb-cluster/scalardb-cluster-sql-grpc-api-guide.md)
diff --git a/docs/3.12/helm-charts/README.md b/docs/3.12/helm-charts/README.md
new file mode 100644
index 00000000..2bc4da97
--- /dev/null
+++ b/docs/3.12/helm-charts/README.md
@@ -0,0 +1,9 @@
+# Index
+
+## For users
+* [Getting Started with Scalar Helm Charts](./getting-started-scalar-helm-charts.md)
+* [Configure a custom values file for Scalar Helm Chart](./configure-custom-values-file.md)
+* [Deploy Scalar products using Scalar Helm Charts](./how-to-deploy-scalar-products.md)
+
+## For developers
+* [Release Flow](./ReleaseFlow.md)
diff --git a/docs/3.12/helm-charts/conf/scalar-loki-stack-custom-values.yaml b/docs/3.12/helm-charts/conf/scalar-loki-stack-custom-values.yaml
new file mode 100644
index 00000000..f078536a
--- /dev/null
+++ b/docs/3.12/helm-charts/conf/scalar-loki-stack-custom-values.yaml
@@ -0,0 +1,55 @@
+promtail:
+ config:
+ snippets:
+ # -- `scapeConfigs` is exactly the part of https://grafana.com/docs/loki/latest/clients/promtail/configuration/#scrape_configs
+ # -- The value will be created as a Kubernetes ConfigMap and then mounted to the Promtail Pod.
+ # -- Not really need to change this value. It's set to scrape all logs of ScalarDL/DB Pods by using regular expression.
+ scrapeConfigs: |
+ # -- the `scalardl` job scrapes all the logs from Scalar Ledger Pods, Scalar Auditor Pods, and the corresponding Envoy Pods
+ - job_name: scalardl
+ pipeline_stages:
+ - docker: {}
+ kubernetes_sd_configs:
+ - role: pod
+ relabel_configs:
+ - source_labels:
+ - __meta_kubernetes_pod_node_name
+ target_label: __host__
+ - action: replace
+ source_labels:
+ - __meta_kubernetes_pod_name
+ target_label: pod
+ - action: keep
+ regex: (.*)scalardl-(.+)
+ source_labels:
+ - pod
+ - replacement: /var/log/pods/*$1/*.log
+ separator: /
+ source_labels:
+ - __meta_kubernetes_pod_uid
+ - __meta_kubernetes_pod_container_name
+ target_label: __path__
+ # -- the `scalardb` job scrapes all the logs of ScalarDB Server Pods and the corresponding Envoy Pods
+ - job_name: scalardb
+ pipeline_stages:
+ - docker: {}
+ kubernetes_sd_configs:
+ - role: pod
+ relabel_configs:
+ - source_labels:
+ - __meta_kubernetes_pod_node_name
+ target_label: __host__
+ - action: replace
+ source_labels:
+ - __meta_kubernetes_pod_name
+ target_label: pod
+ - action: keep
+ regex: (.*)scalardb-(.+)
+ source_labels:
+ - pod
+ - replacement: /var/log/pods/*$1/*.log
+ separator: /
+ source_labels:
+ - __meta_kubernetes_pod_uid
+ - __meta_kubernetes_pod_container_name
+ target_label: __path__
diff --git a/docs/3.12/helm-charts/conf/scalar-manager-custom-values.yaml b/docs/3.12/helm-charts/conf/scalar-manager-custom-values.yaml
new file mode 100644
index 00000000..7d19a4d3
--- /dev/null
+++ b/docs/3.12/helm-charts/conf/scalar-manager-custom-values.yaml
@@ -0,0 +1,12 @@
+scalarManager:
+ # targets -- The targets that Scalar Manager should monitor
+ targets: # example
+ - name: Ledgers
+ adminSrv: _scalardl-admin._tcp.scalardl-headless.default.svc.cluster.local
+ databaseType: cassandra
+
+ # grafanaUrl -- The URL where Grafana can be accessable in public
+ grafanaUrl: "http://localhost:3000"
+
+ # refreshInterval -- The interval that Scalar Manager refresh the status of the monitoring targets
+ refreshInterval: 30
diff --git a/docs/3.12/helm-charts/conf/scalar-prometheus-custom-values.yaml b/docs/3.12/helm-charts/conf/scalar-prometheus-custom-values.yaml
new file mode 100644
index 00000000..816ead1b
--- /dev/null
+++ b/docs/3.12/helm-charts/conf/scalar-prometheus-custom-values.yaml
@@ -0,0 +1,167 @@
+defaultRules:
+ # -- Default PrometheusRules are not enabled
+ create: false
+
+alertmanager:
+ # -- alertmanager is enabled
+ enabled: true
+
+ # -- Only check own namespace
+ alertmanagerConfigNamespaceSelector: null
+
+grafana:
+ # -- grafana is enabled
+ enabled: true
+
+ # -- Default Grafana dashboards are not enabled
+ defaultDashboardsEnabled: false
+
+ sidecar:
+ datasources:
+ enabled: true
+ defaultDatasourceEnabled: false
+ label: grafana_datasource
+ labelValue: "1"
+ dashboards:
+ enabled: true
+ label: grafana_dashboard
+ labelValue: "1"
+ # -- Resource limits & requests
+ resources: {}
+ # requests:
+ # memory: 400Mi
+
+ # -- Grafana's primary configuration
+ grafana.ini:
+ security:
+ # -- allow Grafana to be embedded (not set the X-Frame-Options header)
+ # -- If you use Scalar Manager, you need to set allow_embedding to true.
+ # -- https://grafana.com/docs/grafana/latest/administration/configuration/#allow_embedding
+ allow_embedding: false
+
+ # -- Additional data source configurations
+ additionalDataSources:
+ - name: Prometheus
+ type: prometheus
+ uid: prometheus
+ url: http://scalar-monitoring-kube-pro-prometheus:9090/
+ access: proxy
+ editable: false
+ isDefault: false
+ jsonData:
+ timeInterval: 30s
+ # - name: Loki
+ # type: loki
+ # uid: loki
+ # url: http://scalar-logging-loki:3100/
+ # access: proxy
+ # editable: false
+ # isDefault: false
+
+kubeApiServer:
+ # -- Scraping kube-apiserver is disabled
+ enabled: false
+
+kubeControllerManager:
+ # -- Scraping kube-controller-manager is disabled
+ enabled: false
+
+coreDns:
+ # -- Scraping CoreDNS is disabled
+ enabled: false
+
+kubeEtcd:
+ # -- Scraping etcd is disabled
+ enabled: false
+
+kubeScheduler:
+ # -- Scraping kube-scheduler is disabled
+ enabled: false
+
+kubeProxy:
+ # -- Scraping kube-proxy is disabled
+ enabled: false
+
+kubelet:
+ # -- Scraping kubelet is disabled
+ enabled: false
+
+kubeStateMetrics:
+ # -- kube-state-metrics is disabled
+ enabled: false
+
+nodeExporter:
+ # -- node-exporter is disabled
+ enabled: false
+
+prometheusOperator:
+ # -- Prometheus Operator is enabled
+ enabled: true
+
+ admissionWebhooks:
+ patch:
+ # -- Resource limits & requests
+ resources: {}
+ # requests:
+ # memory: 400Mi
+
+ namespaces:
+ # -- Only check own namespace
+ releaseNamespace: true
+
+ kubeletService:
+ # -- kubelet service for scraping kubelets is disabled
+ enabled: false
+
+ ## -- Resource limits & requests
+ resources: {}
+ # requests:
+ # memory: 400Mi
+
+prometheus:
+ # -- Prometheus is enabled
+ enabled: true
+
+ prometheusSpec:
+ # -- All PrometheusRules are enabled
+ ruleSelectorNilUsesHelmValues: false
+
+ # -- Only check own namespace
+ ruleNamespaceSelector: {}
+
+ # -- All ServiceMonitors are enabled
+ serviceMonitorSelectorNilUsesHelmValues: false
+
+ # -- Only check own namespace
+ serviceMonitorNamespaceSelector: {}
+
+ # -- All PodMonitors are enabled
+ podMonitorSelectorNilUsesHelmValues: false
+
+ # -- Only check own namespace
+ podMonitorNamespaceSelector: {}
+
+ # -- All Probes are enabled
+ probeSelectorNilUsesHelmValues: false
+
+ # -- Only check own namespace
+ probeNamespaceSelector: {}
+
+ ## -- Resource limits & requests
+ resources: {}
+ # requests:
+ # memory: 400Mi
+
+ ## -- Prometheus StorageSpec for persistent data
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md
+ storageSpec: {}
+ ## Using PersistentVolumeClaim
+ ##
+ # volumeClaimTemplate:
+ # spec:
+ # storageClassName: gluster
+ # accessModes: ["ReadWriteOnce"]
+ # resources:
+ # requests:
+ # storage: 50Gi
+ # selector: {}
diff --git a/docs/3.12/helm-charts/configure-custom-values-envoy.md b/docs/3.12/helm-charts/configure-custom-values-envoy.md
new file mode 100644
index 00000000..07660951
--- /dev/null
+++ b/docs/3.12/helm-charts/configure-custom-values-envoy.md
@@ -0,0 +1,174 @@
+# Configure a custom values file for Scalar Envoy
+
+This document explains how to create your custom values file for the Scalar Envoy chart. If you want to know the details of the parameters, please refer to the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/envoy/README.md) of the Scalar Envoy chart.
+
+## Configure custom values for Scalar Envoy chart
+
+The Scalar Envoy chart is used via other charts (scalardb, scalardb-cluster, scalardl, and scalardl-audit), so you don't need to create a custom values file for the Scalar Envoy chart. If you want to configure Scalar Envoy, you need to add the `envoy.*` configuration to the other charts.
+
+For example, if you want to configure the Scalar Envoy for ScalarDB Server, you can configure some Scalar Envoy configurations in the custom values file of ScalarDB as follows.
+
+* Example (scalardb-custom-values.yaml)
+ ```yaml
+ envoy:
+ configurationsForScalarEnvoy:
+ ...
+
+ scalardb:
+ configurationsForScalarDB:
+ ...
+ ```
+
+## Required configurations
+
+### Service configurations
+
+You must set `envoy.service.type` to specify the Service resource type of Kubernetes.
+
+If you accept client requests from inside of the Kubernetes cluster only (for example, if you deploy your client applications on the same Kubernetes cluster as Scalar products), you can set `envoy.service.type` to `ClusterIP`. This configuration doesn't create any load balancers provided by cloud service providers.
+
+```yaml
+envoy:
+ service:
+ type: ClusterIP
+```
+
+If you want to use a load balancer provided by a cloud service provider to accept client requests from outside of the Kubernetes cluster, you need to set `envoy.service.type` to `LoadBalancer`.
+
+```yaml
+envoy:
+ service:
+ type: LoadBalancer
+```
+
+If you want to configure the load balancer via annotations, you can also set annotations to `envoy.service.annotations`.
+
+```yaml
+envoy:
+ service:
+ type: LoadBalancer
+ annotations:
+ service.beta.kubernetes.io/aws-load-balancer-internal: "true"
+ service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
+```
+
+## Optional configurations
+
+### Resource configurations (Recommended in the production environment)
+
+If you want to control pod resources using the requests and limits of Kubernetes, you can use `envoy.resources`.
+
+You can configure them using the same syntax as the requests and limits of Kubernetes. So, please refer to the official document [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for more details on the requests and limits of Kubernetes.
+
+```yaml
+envoy:
+ resources:
+ requests:
+ cpu: 1000m
+ memory: 2Gi
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+```
+
+### Affinity configurations (Recommended in the production environment)
+
+If you want to control pod deployment using the affinity and anti-affinity of Kubernetes, you can use `envoy.affinity`.
+
+You can configure them using the same syntax as the affinity of Kubernetes. So, please refer to the official document [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) for more details on the affinity configuration of Kubernetes.
+
+```yaml
+envoy:
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/name
+ operator: In
+ values:
+ - scalardb-cluster
+ - key: app.kubernetes.io/app
+ operator: In
+ values:
+ - envoy
+ topologyKey: kubernetes.io/hostname
+ weight: 50
+```
+
+### Prometheus and Grafana configurations (Recommended in production environments)
+
+If you want to monitor Scalar Envoy pods using [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), you can deploy a ConfigMap, a ServiceMonitor, and a PrometheusRule resource for kube-prometheus-stack using `envoy.grafanaDashboard.enabled`, `envoy.serviceMonitor.enabled`, and `envoy.prometheusRule.enabled`.
+
+```yaml
+envoy:
+ grafanaDashboard:
+ enabled: true
+ namespace: monitoring
+ serviceMonitor:
+ enabled: true
+ namespace: monitoring
+ interval: 15s
+ prometheusRule:
+ enabled: true
+ namespace: monitoring
+```
+
+### SecurityContext configurations (Default value is recommended)
+
+If you want to set SecurityContext and PodSecurityContext for Scalar Envoy pods, you can use `envoy.securityContext` and `envoy.podSecurityContext`.
+
+You can configure them using the same syntax as SecurityContext and PodSecurityContext of Kubernetes. So, please refer to the official document [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for more details on the SecurityContext and PodSecurityContext configurations of Kubernetes.
+
+```yaml
+envoy:
+ podSecurityContext:
+ seccompProfile:
+ type: RuntimeDefault
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ runAsNonRoot: true
+ allowPrivilegeEscalation: false
+```
+
+### Image configurations (Default value is recommended)
+
+If you want to change the image repository and version, you can use `envoy.image.repository` to specify the container repository information of the Scalar Envoy container image that you want to pull.
+
+```yaml
+envoy:
+ image:
+ repository:
+```
+
+If you're using AWS or Azure, please refer to the following documents for more details:
+
+* [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md)
+* [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md)
+
+### Replica configurations (Optional based on your environment)
+
+You can specify the number of replicas (pods) of Scalar Envoy using `envoy.replicaCount`.
+
+```yaml
+envoy:
+ replicaCount: 3
+```
+
+### Taint and toleration configurations (Optional based on your environment)
+
+If you want to control pod deployment by using the taints and tolerations in Kubernetes, you can use `envoy.tolerations`.
+
+You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
+
+```yaml
+envoy:
+ tolerations:
+ - effect: NoSchedule
+ key: scalar-labs.com/dedicated-node
+ operator: Equal
+ value: scalardb
+```
diff --git a/docs/3.12/helm-charts/configure-custom-values-file.md b/docs/3.12/helm-charts/configure-custom-values-file.md
new file mode 100644
index 00000000..2290933a
--- /dev/null
+++ b/docs/3.12/helm-charts/configure-custom-values-file.md
@@ -0,0 +1,14 @@
+# Configure a custom values file for Scalar Helm Charts
+
+When you deploy Scalar products using Scalar Helm Charts, you must prepare your custom values file based on your environment. Please refer to the following documents for more details on how to a create custom values file for each product.
+
+* [ScalarDB Cluster](./configure-custom-values-scalardb-cluster.md)
+* [ScalarDB Analytics with PostgreSQL](./configure-custom-values-scalardb-analytics-postgresql.md)
+* [ScalarDL Ledger](./configure-custom-values-scalardl-ledger.md)
+* [ScalarDL Auditor](./configure-custom-values-scalardl-auditor.md)
+* [ScalarDL Schema Loader](./configure-custom-values-scalardl-schema-loader.md)
+* [Scalar Admin for Kubernetes](./configure-custom-values-scalar-admin-for-kubernetes.md)
+* [Scalar Manager](./configure-custom-values-scalar-manager.md)
+* [Envoy](./configure-custom-values-envoy.md)
+* [[Deprecated] ScalarDB Server](./configure-custom-values-scalardb.md)
+* [[Deprecated] ScalarDB GraphQL](./configure-custom-values-scalardb-graphql.md)
diff --git a/docs/3.12/helm-charts/configure-custom-values-scalar-admin-for-kubernetes.md b/docs/3.12/helm-charts/configure-custom-values-scalar-admin-for-kubernetes.md
new file mode 100644
index 00000000..39b0ef04
--- /dev/null
+++ b/docs/3.12/helm-charts/configure-custom-values-scalar-admin-for-kubernetes.md
@@ -0,0 +1,101 @@
+# Configure a custom values file for Scalar Admin for Kubernetes
+
+This document explains how to create your custom values file for the Scalar Admin for Kubernetes chart. For details on the parameters, see the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalar-admin-for-kubernetes/README.md) of the Scalar Admin for Kubernetes chart.
+
+## Required configurations
+
+This section explains the required configurations when setting up a custom values file for Scalar Admin for Kubernetes.
+
+### Flag configurations
+
+You must specify several flags to `scalarAdminForKubernetes.commandArgs` as an array to run Scalar Admin for Kubernetes. For more details on the flags, see [README](https://github.com/scalar-labs/scalar-admin-for-kubernetes/blob/main/README.md) of Scalar Admin for Kubernetes.
+
+```yaml
+scalarAdminForKubernetes:
+ commandArgs:
+ - -r
+ -
+ - -n
+ -
+ - -d
+ -
+ - -z
+ -
+```
+
+## Optional configurations
+
+This section explains the optional configurations when setting up a custom values file for Scalar Admin for Kubernetes.
+
+### CronJob configurations (optional based on your environment)
+
+By default, the Scalar Admin for Kubernetes chart creates a [Job](https://kubernetes.io/docs/concepts/workloads/controllers/job/) resource to run the Scalar Admin for Kubernetes CLI tool once. If you want to run the Scalar Admin for Kubernetes CLI tool periodically by using [CronJob](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/), you can set `scalarAdminForKubernetes.cronJob.enabled` to `true`. Also, you can set some configurations for the CronJob resource.
+
+```yaml
+scalarAdminForKubernetes:
+ cronJob:
+ enabled: true
+ timeZone: "Etc/UTC"
+ schedule: "0 0 * * *"
+```
+
+### Resource configurations (recommended in production environments)
+
+To control pod resources by using requests and limits in Kubernetes, you can use `scalarAdminForKubernetes.resources`.
+
+You can configure requests and limits by using the same syntax as requests and limits in Kubernetes. For more details on requests and limits in Kubernetes, see [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
+
+```yaml
+scalarAdminForKubernetes:
+ resources:
+ requests:
+ cpu: 1000m
+ memory: 2Gi
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+```
+
+### SecurityContext configurations (default value is recommended)
+
+To set SecurityContext and PodSecurityContext for Scalar Admin for Kubernetes pods, you can use `scalarAdminForKubernetes.securityContext` and `scalarAdminForKubernetes.podSecurityContext`.
+
+You can configure SecurityContext and PodSecurityContext by using the same syntax as SecurityContext and PodSecurityContext in Kubernetes. For more details on the SecurityContext and PodSecurityContext configurations in Kubernetes, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/).
+
+```yaml
+scalarAdminForKubernetes:
+ podSecurityContext:
+ seccompProfile:
+ type: RuntimeDefault
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ runAsNonRoot: true
+ allowPrivilegeEscalation: false
+```
+
+### Image configurations (default value is recommended)
+
+If you want to change the image repository, you can use `scalarAdminForKubernetes.image.repository` to specify the container repository information of the Scalar Admin for Kubernetes image that you want to pull.
+
+```yaml
+scalarAdminForKubernetes:
+ image:
+ repository:
+```
+
+### Taint and toleration configurations (optional based on your environment)
+
+If you want to control pod deployment by using taints and tolerations in Kubernetes, you can use `scalarAdminForKubernetes.tolerations`.
+
+You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
+
+```yaml
+scalarAdminForKubernetes:
+ tolerations:
+ - effect: NoSchedule
+ key: scalar-labs.com/dedicated-node
+ operator: Equal
+ value: scalardb-analytics-postgresql
+```
diff --git a/docs/3.12/helm-charts/configure-custom-values-scalar-manager.md b/docs/3.12/helm-charts/configure-custom-values-scalar-manager.md
new file mode 100644
index 00000000..2ceda5b8
--- /dev/null
+++ b/docs/3.12/helm-charts/configure-custom-values-scalar-manager.md
@@ -0,0 +1,66 @@
+# Configure a custom values file for Scalar Manager
+
+This document explains how to create your custom values file for the Scalar Manager chart. If you want to know the details of the parameters, please refer to the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalar-manager/README.md) of the Scalar Manager chart.
+
+## Required configurations
+
+### Service configurations
+
+You must set `service.type` to specify the Service resource type of Kubernetes. If you want to use a load balancer provided by could providers, you need to set `service.type` to `LoadBalancer`.
+
+```yaml
+service:
+ type: LoadBalancer
+```
+
+### Image configurations
+
+You must set `image.repository`. Be sure to specify the Scalar Manager container image so that you can pull the image from the container repository.
+
+```yaml
+image:
+ repository:
+```
+
+### Targets configurations
+
+You must set `scalarManager.targets`. Please set the DNS Service URL that returns the SRV record of pods. Kubernetes creates this URL for the named port of the headless service of the Scalar product. The format is `_{port name}._{protocol}.{service name}.{namespace}.svc.{cluster domain name}`.
+
+```yaml
+scalarManager:
+ targets:
+ - name: Ledger
+ adminSrv: _scalardl-admin._tcp.scalardl-headless.default.svc.cluster.local
+ databaseType: cassandra
+ - name: Auditor
+ adminSrv: _scalardl-auditor-admin._tcp.scalardl-auditor-headless.default.svc.cluster.local
+ databaseType: cassandra
+```
+
+### Grafana configurations
+
+You must set the `scalarManager.grafanaUrl`. Please specify your Grafana URL.
+
+```yaml
+scalarManager:
+ grafanaUrl: "http://localhost:3000"
+```
+
+## Optional configurations
+
+### Replica configurations (Optional based on your environment)
+
+You can specify the number of replicas (pods) of Scalar Manager using `replicaCount`.
+
+```yaml
+replicaCount: 3
+```
+
+### Refresh interval configurations (Optional based on your environment)
+
+You can specify the refresh interval that Scalar Manager checks the status of the products using `scalarManager.refreshInterval`.
+
+```yaml
+scalarManager:
+ refreshInterval: 30
+```
diff --git a/docs/3.12/helm-charts/configure-custom-values-scalardb-analytics-postgresql.md b/docs/3.12/helm-charts/configure-custom-values-scalardb-analytics-postgresql.md
new file mode 100644
index 00000000..91e4ecb0
--- /dev/null
+++ b/docs/3.12/helm-charts/configure-custom-values-scalardb-analytics-postgresql.md
@@ -0,0 +1,185 @@
+# Configure a custom values file for ScalarDB Analytics with PostgreSQL
+
+This document explains how to create your custom values file for the ScalarDB Analytics with PostgreSQL chart. For details on the parameters, see the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalardb-analytics-postgresql/README.md) of the ScalarDB Analytics with PostgreSQL chart.
+
+## Required configurations
+
+This section explains the required configurations when setting up a custom values file for ScalarDB Analytics with PostgreSQL.
+
+### Database configurations
+
+To access databases via ScalarDB Analytics with PostgreSQL, you must set the `scalardbAnalyticsPostgreSQL.databaseProperties` parameter by following the same syntax that you use to configure the `database.properties` file. For details about configurations, see [ScalarDB Configurations](https://github.com/scalar-labs/scalardb/blob/master/docs/configurations.md).
+
+```yaml
+scalardbAnalyticsPostgreSQL:
+ databaseProperties: |
+ scalar.db.contact_points=localhost
+ scalar.db.username=${env:SCALAR_DB_USERNAME:-}
+ scalar.db.password=${env:SCALAR_DB_PASSWORD:-}
+ scalar.db.storage=cassandra
+```
+
+### Database namespaces configurations
+
+You must set `schemaImporter.namespaces` to all the database namespaces that include tables you want to read via ScalarDB Analytics with PostgreSQL.
+
+```yaml
+schemaImporter:
+ namespaces:
+ - namespace1
+ - namespace2
+ - namespace3
+```
+
+## Optional configurations
+
+This section explains the optional configurations when setting up a custom values file for ScalarDB Analytics with PostgreSQL.
+
+### Resource configurations (recommended in production environments)
+
+To control pod resources by using requests and limits in Kubernetes, you can use `scalardbAnalyticsPostgreSQL.resources`.
+
+You can configure requests and limits by using the same syntax as requests and limits in Kubernetes. For more details on requests and limits in Kubernetes, see [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
+
+```yaml
+scalardbAnalyticsPostgreSQL:
+ resources:
+ requests:
+ cpu: 1000m
+ memory: 2Gi
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+```
+
+### Secret configurations (recommended in production environments)
+
+To use environment variables to set some properties, like credentials, in `scalardbAnalyticsPostgreSQL.databaseProperties`, you can use `scalardbAnalyticsPostgreSQL.secretName` to specify the secret resource that includes some credentials.
+
+For example, you can set credentials for a backend database (`scalar.db.username` and `scalar.db.password`) by using environment variables, which makes your pods more secure.
+
+For more details on how to use a secret resource, see [How to use Secret resources to pass the credentials as the environment variables into the properties file](./use-secret-for-credentials.md).
+
+```yaml
+scalardbAnalyticsPostgreSQL:
+ secretName: "scalardb-analytics-postgresql-credentials-secret"
+```
+
+### Affinity configurations (recommended in production environments)
+
+To control pod deployment by using affinity and anti-affinity in Kubernetes, you can use `scalardbAnalyticsPostgreSQL.affinity`.
+
+You can configure affinity and anti-affinity by using the same syntax for affinity and anti-affinity in Kubernetes. For more details on configuring affinity in Kubernetes, see [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/).
+
+```yaml
+scalardbAnalyticsPostgreSQL:
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/name
+ operator: In
+ values:
+ - scalardb-analytics-postgresql
+ - key: app.kubernetes.io/app
+ operator: In
+ values:
+ - scalardb-analytics-postgresql
+ topologyKey: kubernetes.io/hostname
+```
+
+### SecurityContext configurations (default value is recommended)
+
+To set SecurityContext and PodSecurityContext for ScalarDB Analytics with PostgreSQL pods, you can use `scalardbAnalyticsPostgreSQL.securityContext`, `scalardbAnalyticsPostgreSQL.podSecurityContext`, and `schemaImporter.securityContext`.
+
+You can configure SecurityContext and PodSecurityContext by using the same syntax as SecurityContext and PodSecurityContext in Kubernetes. For more details on the SecurityContext and PodSecurityContext configurations in Kubernetes, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/).
+
+```yaml
+scalardbAnalyticsPostgreSQL:
+ podSecurityContext:
+ fsGroup: 201
+ seccompProfile:
+ type: RuntimeDefault
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ runAsNonRoot: true
+ runAsUser: 999
+ allowPrivilegeEscalation: false
+
+schemaImporter:
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ runAsNonRoot: true
+ allowPrivilegeEscalation: false
+```
+
+### Image configurations (default value is recommended)
+
+If you want to change the image repository, you can use `scalardbAnalyticsPostgreSQL.image.repository` and `schemaImporter.image.repository` to specify the container repository information of the ScalarDB Analytics with PostgreSQL and Schema Importer images that you want to pull.
+
+```yaml
+scalardbAnalyticsPostgreSQL:
+ image:
+ repository:
+
+schemaImporter:
+ image:
+ repository:
+```
+
+### Replica configurations (optional based on your environment)
+
+You can specify the number of ScalarDB Analytics with PostgreSQL replicas (pods) by using `scalardbAnalyticsPostgreSQL.replicaCount`.
+
+```yaml
+scalardbAnalyticsPostgreSQL:
+ replicaCount: 3
+```
+
+### PostgreSQL database name configuration (optional based on your environment)
+
+You can specify the database name that you create in PostgreSQL. Schema Importer creates some objects, such as a view of ScalarDB Analytics with PostgreSQL, in this database.
+
+```yaml
+scalardbAnalyticsPostgreSQL:
+ postgresql:
+ databaseName: scalardb
+```
+
+### PostgreSQL superuser password configuration (optional based on your environment)
+
+You can specify the secret name that includes the superuser password for PostgreSQL.
+
+```yaml
+scalardbAnalyticsPostgreSQL:
+ postgresql:
+ secretName: scalardb-analytics-postgresql-superuser-password
+```
+
+{% capture notice--info %}
+**Note**
+
+You must create a secret resource with this name (`scalardb-analytics-postgresql-superuser-password` by default) before you deploy ScalarDB Analytics with PostgreSQL. For details, see [Prepare a secret resource](./how-to-deploy-scalardb-analytics-postgresql.md#prepare-a-secret-resource).
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+### Taint and toleration configurations (optional based on your environment)
+
+If you want to control pod deployment by using taints and tolerations in Kubernetes, you can use `scalardbAnalyticsPostgreSQL.tolerations`.
+
+You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
+
+```yaml
+scalardbAnalyticsPostgreSQL:
+ tolerations:
+ - effect: NoSchedule
+ key: scalar-labs.com/dedicated-node
+ operator: Equal
+ value: scalardb-analytics-postgresql
+```
diff --git a/docs/3.12/helm-charts/configure-custom-values-scalardb-cluster.md b/docs/3.12/helm-charts/configure-custom-values-scalardb-cluster.md
new file mode 100644
index 00000000..397d41c2
--- /dev/null
+++ b/docs/3.12/helm-charts/configure-custom-values-scalardb-cluster.md
@@ -0,0 +1,223 @@
+# Configure a custom values file for ScalarDB Cluster
+
+This document explains how to create your custom values file for the ScalarDB Cluster chart. For details on the parameters, see the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalardb-cluster/README.md) of the ScalarDB Cluster chart.
+
+## Required configurations
+
+### Image configurations
+
+You must set `scalardbCluster.image.repository`. Be sure to specify the ScalarDB Cluster container image so that you can pull the image from the container repository.
+
+```yaml
+scalardbCluster:
+ image:
+ repository:
+```
+
+### Database configurations
+
+You must set `scalardbCluster.scalardbClusterNodeProperties`. Please set `scalardb-cluster-node.properties` to this parameter. For more details on the configurations of ScalarDB Cluster, see [ScalarDB Cluster Configurations](https://github.com/scalar-labs/scalardb-cluster/blob/main/docs/scalardb-cluster-configurations.md).
+
+```yaml
+scalardbCluster:
+ scalardbClusterNodeProperties: |
+ scalar.db.cluster.membership.type=KUBERNETES
+ scalar.db.cluster.membership.kubernetes.endpoint.namespace_name=${env:SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAMESPACE_NAME}
+ scalar.db.cluster.membership.kubernetes.endpoint.name=${env:SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAME}
+ scalar.db.contact_points=localhost
+ scalar.db.username=${env:SCALAR_DB_USERNAME}
+ scalar.db.password=${env:SCALAR_DB_PASSWORD}
+ scalar.db.storage=cassandra
+```
+
+Note that you must always set the following three properties if you deploy ScalarDB Cluster in a Kubernetes environment by using Scalar Helm Chart. These properties are fixed values. Since the properties don't depend on individual environments, you can set the same values by copying the following values and pasting them in `scalardbCluster.scalardbClusterNodeProperties`.
+
+```yaml
+scalardbCluster:
+ scalardbClusterNodeProperties: |
+ scalar.db.cluster.membership.type=KUBERNETES
+ scalar.db.cluster.membership.kubernetes.endpoint.namespace_name=${env:SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAMESPACE_NAME}
+ scalar.db.cluster.membership.kubernetes.endpoint.name=${env:SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAME}
+```
+
+## Optional configurations
+
+### Resource configurations (recommended in production environments)
+
+To control pod resources by using requests and limits in Kubernetes, you can use `scalardbCluster.resources`.
+
+Note that, for commercial licenses, the resources for each pod of Scalar products are limited to 2vCPU / 4GB memory. Also, if you use the pay-as-you-go containers that the AWS Marketplace provides, you will not be able to run any containers that exceed the 2vCPU / 4GB memory configuration in `resources.limits`. If you exceed this resource limitation, the pods will automatically stop.
+
+You can configure requests and limits by using the same syntax as requests and limits in Kubernetes. For more details on requests and limits in Kubernetes, see [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
+
+```yaml
+scalardbCluster:
+ resources:
+ requests:
+ cpu: 2000m
+ memory: 4Gi
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+```
+
+### Secret configurations (recommended in production environments)
+
+To use environment variables to set some properties (e.g., credentials) in `scalardbCluster.scalardbClusterNodeProperties`, you can use `scalardbCluster.secretName` to specify the Secret resource that includes some credentials.
+
+For example, you can set credentials for a backend database (`scalar.db.username` and `scalar.db.password`) by using environment variables, which makes your pods more secure.
+
+For more details on how to use a Secret resource, see [How to use Secret resources to pass the credentials as the environment variables into the properties file](./use-secret-for-credentials.md).
+
+```yaml
+scalardbCluster:
+ secretName: "scalardb-cluster-credentials-secret"
+```
+
+### Affinity configurations (recommended in production environments)
+
+To control pod deployment by using affinity and anti-affinity in Kubernetes, you can use `scalardbCluster.affinity`.
+
+You can configure affinity and anti-affinity by using the same syntax for affinity and anti-affinity in Kubernetes. For more details on configuring affinity in Kubernetes, see [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/).
+
+```yaml
+scalardbCluster:
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/name
+ operator: In
+ values:
+ - scalardb-cluster
+ - key: app.kubernetes.io/app
+ operator: In
+ values:
+ - scalardb-cluster
+ topologyKey: kubernetes.io/hostname
+ weight: 50
+```
+
+### Prometheus and Grafana configurations (recommended in production environments)
+
+To monitor ScalarDB Cluster pods by using [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), you can set `scalardbCluster.grafanaDashboard.enabled`, `scalardbCluster.serviceMonitor.enabled`, and `scalardbCluster.prometheusRule.enabled` to `true`. When you set these configurations to `true`, the chart deploys the necessary resources and kube-prometheus-stack starts monitoring automatically.
+
+```yaml
+scalardbCluster:
+ grafanaDashboard:
+ enabled: true
+ namespace: monitoring
+ serviceMonitor:
+ enabled: true
+ namespace: monitoring
+ interval: 15s
+ prometheusRule:
+ enabled: true
+ namespace: monitoring
+```
+
+### SecurityContext configurations (default value is recommended)
+
+To set SecurityContext and PodSecurityContext for ScalarDB Cluster pods, you can use `scalardbCluster.securityContext` and `scalardbCluster.podSecurityContext`.
+
+You can configure SecurityContext and PodSecurityContext by using the same syntax as SecurityContext and PodSecurityContext in Kubernetes. For more details on the SecurityContext and PodSecurityContext configurations in Kubernetes, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/).
+
+```yaml
+scalardbCluster:
+ podSecurityContext:
+ seccompProfile:
+ type: RuntimeDefault
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ runAsNonRoot: true
+ allowPrivilegeEscalation: false
+```
+
+### Replica configurations (optional based on your environment)
+
+You can specify the number of ScalarDB Cluster replicas (pods) by using `scalardbCluster.replicaCount`.
+
+```yaml
+scalardbCluster:
+ replicaCount: 3
+```
+
+### Logging configurations (optional based on your environment)
+
+To change the ScalarDB Cluster log level, you can use `scalardbCluster.logLevel`.
+
+```yaml
+scalardbCluster:
+ logLevel: INFO
+```
+
+### GraphQL configurations (optional based on your environment)
+
+To use the GraphQL feature in ScalarDB Cluster, you can set `scalardbCluster.graphql.enabled` to `true` to deploy some resources for the GraphQL feature. Note that you also need to set `scalar.db.graphql.enabled=true` in `scalardbCluster.scalardbClusterNodeProperties` when using the GraphQL feature.
+
+```yaml
+scalardbCluster:
+ graphql:
+ enabled: true
+```
+
+Also, you can configure the `Service` resource that accepts GraphQL requests from clients.
+
+```yaml
+scalardbCluster:
+ graphql:
+ service:
+ type: ClusterIP
+ annotations: {}
+ ports:
+ graphql:
+ port: 8080
+ targetPort: 8080
+ protocol: TCP
+```
+
+### SQL configurations (optional based on your environment)
+
+To use the SQL feature in ScalarDB Cluster, there is no configuration necessary for custom values files. You can use the feature by setting `scalar.db.sql.enabled=true` in `scalardbCluster.scalardbClusterNodeProperties`.
+
+### Scalar Envoy configurations (optional based on your environment)
+
+To use ScalarDB Cluster with `indirect` mode, you must enable Envoy as follows.
+
+```yaml
+envoy:
+ enabled: true
+```
+
+Also, you must set the Scalar Envoy configurations in the custom values file for ScalarDB Cluster. This is because clients need to send requests to ScalarDB Cluster via Scalar Envoy as the load balancer of gRPC requests if you deploy ScalarDB Cluster in a Kubernetes environment with `indirect` mode.
+
+For more details on Scalar Envoy configurations, see [Configure a custom values file for Scalar Envoy](configure-custom-values-envoy.md).
+
+```yaml
+envoy:
+ configurationsForScalarEnvoy:
+ ...
+
+scalardbCluster:
+ configurationsForScalarDbCluster:
+ ...
+```
+
+### Taint and toleration configurations (optional based on your environment)
+
+If you want to control pod deployment by using the taints and tolerations in Kubernetes, you can use `scalardbCluster.tolerations`.
+
+You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
+
+```yaml
+scalardbCluster:
+ tolerations:
+ - effect: NoSchedule
+ key: scalar-labs.com/dedicated-node
+ operator: Equal
+ value: scalardb-cluster
+```
diff --git a/docs/3.12/helm-charts/configure-custom-values-scalardb-graphql.md b/docs/3.12/helm-charts/configure-custom-values-scalardb-graphql.md
new file mode 100644
index 00000000..b001c562
--- /dev/null
+++ b/docs/3.12/helm-charts/configure-custom-values-scalardb-graphql.md
@@ -0,0 +1,219 @@
+# [Deprecated] Configure a custom values file for ScalarDB GraphQL
+
+{% capture notice--info %}
+**Note**
+
+ScalarDB GraphQL Server is now deprecated. Please use [ScalarDB Cluster](./configure-custom-values-scalardb-cluster.md) instead.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+This document explains how to create your custom values file for the ScalarDB GraphQL chart. If you want to know the details of the parameters, please refer to the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalardb-graphql/README.md) of the ScalarDB GraphQL chart.
+
+## Required configurations
+
+### Ingress configuration
+
+You must set `ingress` to listen the client requests. When you deploy multiple GraphQL servers, session affinity is required to handle transactions properly. This is because GraphQL servers keep the transactions in memory, so GraphQL queries that use continued transactions must be routed to the same server that started the transaction.
+
+For example, if you use NGINX Ingress Controller, you can set ingress configurations as follows.
+
+```yaml
+ingress:
+ enabled: true
+ className: nginx
+ annotations:
+ nginx.ingress.kubernetes.io/session-cookie-path: /
+ nginx.ingress.kubernetes.io/affinity: cookie
+ nginx.ingress.kubernetes.io/session-cookie-name: INGRESSCOOKIE
+ nginx.ingress.kubernetes.io/session-cookie-hash: sha1
+ nginx.ingress.kubernetes.io/session-cookie-max-age: "300"
+ hosts:
+ - host: ""
+ paths:
+ - path: /graphql
+ pathType: Exact
+```
+
+If you use ALB of AWS, you can set ingress configurations as follows.
+
+```yaml
+ingress:
+ enabled: true
+ className: alb
+ annotations:
+ alb.ingress.kubernetes.io/scheme: internal
+ alb.ingress.kubernetes.io/target-group-attributes: stickiness.enabled=true,stickiness.lb_cookie.duration_seconds=60
+ alb.ingress.kubernetes.io/target-type: ip
+ alb.ingress.kubernetes.io/healthcheck-path: /graphql?query=%7B__typename%7D
+ hosts:
+ - host: ""
+ paths:
+ - path: /graphql
+ pathType: Exact
+```
+
+### Image configurations
+
+You must set `image.repository`. Be sure to specify the ScalarDB GraphQL container image so that you can pull the image from the container repository.
+
+```yaml
+image:
+ repository:
+```
+
+If you're using AWS or Azure, please refer to the following documents for more details:
+
+* [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md)
+* [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md)
+
+### Database configurations
+
+You must set `scalarDbGraphQlConfiguration`.
+
+If you use ScalarDB Server with ScalarDB GraphQL (recommended), you must set the configuration to access the ScalarDB Server pods.
+
+```yaml
+scalarDbGraphQlConfiguration:
+ contactPoints:
+ contactPort: 60051
+ storage: "grpc"
+ transactionManager: "grpc"
+ namespaces:
+```
+
+## Optional configurations
+
+### Resource configurations (Recommended in the production environment)
+
+If you want to control pod resources using the requests and limits of Kubernetes, you can use `resources`.
+
+Note that the resources for one pod of Scalar products are limited to 2vCPU / 4GB memory from the perspective of the commercial license. Also, when you get the pay-as-you-go containers provided from AWS Marketplace, you cannot run those containers with more than 2vCPU / 4GB memory configuration in the `resources.limits`. When you exceed this limitation, pods are automatically stopped.
+
+You can configure them using the same syntax as the requests and limits of Kubernetes. So, please refer to the official document [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for more details on the requests and limits of Kubernetes.
+
+```yaml
+resources:
+ requests:
+ cpu: 2000m
+ memory: 4Gi
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+```
+
+### Affinity configurations (Recommended in the production environment)
+
+If you want to control pod deployment using the affinity and anti-affinity of Kubernetes, you can use `affinity`.
+
+You can configure them using the same syntax as the affinity of Kubernetes. So, please refer to the official document [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) for more details on the affinity configuration of Kubernetes.
+
+```yaml
+affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/app
+ operator: In
+ values:
+ - scalardb-graphql
+ topologyKey: kubernetes.io/hostname
+ weight: 50
+```
+
+### Prometheus/Grafana configurations (Recommended in the production environment)
+
+If you want to monitor ScalarDB GraphQL pods using [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), you can deploy a ConfigMap, a ServiceMonitor, and a PrometheusRule resource for kube-prometheus-stack using `grafanaDashboard.enabled`, `serviceMonitor.enabled`, and `prometheusRule.enabled`.
+
+```yaml
+grafanaDashboard:
+ enabled: true
+ namespace: monitoring
+serviceMonitor:
+ enabled: true
+ namespace: monitoring
+ interval: 15s
+prometheusRule:
+ enabled: true
+ namespace: monitoring
+```
+
+### SecurityContext configurations (Default value is recommended)
+
+If you want to set SecurityContext and PodSecurityContext for ScalarDB GraphQL pods, you can use `securityContext` and `podSecurityContext`.
+
+You can configure them using the same syntax as SecurityContext and PodSecurityContext of Kubernetes. So, please refer to the official document [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for more details on the SecurityContext and PodSecurityContext configurations of Kubernetes.
+
+```yaml
+podSecurityContext:
+ seccompProfile:
+ type: RuntimeDefault
+
+securityContext:
+ capabilities:
+ drop:
+ - ALL
+ runAsNonRoot: true
+ allowPrivilegeEscalation: false
+```
+
+### GraphQL Server configurations (Optional based on your environment)
+
+If you want to change the path to run the graphql queries, you can use `scalarDbGraphQlConfiguration.path`. By default, you can run the graphql queries using `http://:80/graphql`.
+
+You can also enable/disable [GraphiQL](https://github.com/graphql/graphiql/tree/main/packages/graphiql) using `scalarDbGraphQlConfiguration.graphiql`.
+
+```yaml
+scalarDbGraphQlConfiguration:
+ path: /graphql
+ graphiql: "true"
+```
+
+### TLS configurations (Optional based on your environment)
+
+If you want to use TLS between the client and the ingress, you can use `ingress.tls`.
+
+You must create a Secret resource that includes a secret key and a certificate file. Please refer to the official document [Ingress - TLS](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) for more details on the Secret resource for Ingress.
+
+```yaml
+ingress:
+ tls:
+ - hosts:
+ - foo.example.com
+ - bar.example.com
+ - bax.example.com
+ secretName: graphql-ingress-tls
+```
+
+### Replica configurations (Optional based on your environment)
+
+You can specify the number of replicas (pods) of ScalarDB GraphQL using `replicaCount`.
+
+```yaml
+replicaCount: 3
+```
+
+### Logging configurations (Optional based on your environment)
+
+If you want to change the log level of ScalarDB GraphQL, you can use `scalarDbGraphQlConfiguration.logLevel`.
+
+```yaml
+scalarDbGraphQlConfiguration:
+ logLevel: INFO
+```
+
+### Taint and toleration configurations (Optional based on your environment)
+
+If you want to control pod deployment by using the taints and tolerations in Kubernetes, you can use `tolerations`.
+
+You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
+
+```yaml
+tolerations:
+ - effect: NoSchedule
+ key: scalar-labs.com/dedicated-node
+ operator: Equal
+ value: scalardb
+```
diff --git a/docs/3.12/helm-charts/configure-custom-values-scalardb.md b/docs/3.12/helm-charts/configure-custom-values-scalardb.md
new file mode 100644
index 00000000..55f2fb48
--- /dev/null
+++ b/docs/3.12/helm-charts/configure-custom-values-scalardb.md
@@ -0,0 +1,196 @@
+# [Deprecated] Configure a custom values file for ScalarDB Server
+
+{% capture notice--info %}
+**Note**
+
+ScalarDB Server is now deprecated. Please use [ScalarDB Cluster](./configure-custom-values-scalardb-cluster.md) instead.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+This document explains how to create your custom values file for the ScalarDB Server chart. If you want to know the details of the parameters, please refer to the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalardb/README.md) of the ScalarDB Server chart.
+
+## Required configurations
+
+### Scalar Envoy configurations
+
+You must set the Scalar Envoy configurations in the custom values file for ScalarDB Server. This is because client requests are sent to ScalarDB Server via Scalar Envoy as the load balancer of gRPC requests if you deploy ScalarDB Server on a Kubernetes environment.
+
+Please refer to the document [Configure a custom values file for Scalar Envoy](configure-custom-values-envoy.md) for more details on the Scalar Envoy configurations.
+
+```yaml
+envoy:
+ configurationsForScalarEnvoy:
+ ...
+
+scalardb:
+ configurationsForScalarDB:
+ ...
+```
+
+### Image configurations
+
+You must set `scalardb.image.repository`. Be sure to specify the ScalarDB Server container image so that you can pull the image from the container repository.
+
+```yaml
+scalardb:
+ image:
+ repository:
+```
+
+If you're using AWS or Azure, please refer to the following documents for more details:
+
+* [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md)
+* [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md)
+
+### Database configurations
+
+You must set `scalardb.databaseProperties`. Please set your `database.properties` to this parameter. Please refer to the [Configure ScalarDB Server](https://github.com/scalar-labs/scalardb/blob/master/docs/scalardb-server.md#configure-scalardb-server) for more details on the configuration of ScalarDB Server.
+
+```yaml
+scalardb:
+ databaseProperties: |
+ scalar.db.server.port=60051
+ scalar.db.server.prometheus_exporter_port=8080
+ scalar.db.server.grpc.max_inbound_message_size=
+ scalar.db.server.grpc.max_inbound_metadata_size=
+ scalar.db.contact_points=localhost
+ scalar.db.username=cassandra
+ scalar.db.password=cassandra
+ scalar.db.storage=cassandra
+ scalar.db.transaction_manager=consensus-commit
+ scalar.db.consensus_commit.isolation_level=SNAPSHOT
+ scalar.db.consensus_commit.serializable_strategy=
+ scalar.db.consensus_commit.include_metadata.enabled=false
+```
+
+## Optional configurations
+
+### Resource configurations (Recommended in the production environment)
+
+If you want to control pod resources using the requests and limits of Kubernetes, you can use `scalardb.resources`.
+
+Note that the resources for one pod of Scalar products are limited to 2vCPU / 4GB memory from the perspective of the commercial license. Also, when you get the pay-as-you-go containers provided from AWS Marketplace, you cannot run those containers with more than 2vCPU / 4GB memory configuration in the `resources.limits`. When you exceed this limitation, pods are automatically stopped.
+
+You can configure them using the same syntax as the requests and limits of Kubernetes. So, please refer to the official document [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for more details on the requests and limits of Kubernetes.
+
+```yaml
+scalardb:
+ resources:
+ requests:
+ cpu: 2000m
+ memory: 4Gi
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+```
+
+### Secret configurations (Recommended in the production environment)
+
+If you want to use environment variables to set some properties (e.g., credentials) in the `scalardb.databaseProperties`, you can use `scalardb.secretName` to specify the Secret resource that includes some credentials.
+
+For example, you can set credentials for a backend database (`scalar.db.username` and `scalar.db.password`) using environment variables, which makes your pods more secure.
+
+Please refer to the document [How to use Secret resources to pass the credentials as the environment variables into the properties file](./use-secret-for-credentials.md) for more details on how to use a Secret resource.
+
+```yaml
+scalardb:
+ secretName: "scalardb-credentials-secret"
+```
+
+### Affinity configurations (Recommended in the production environment)
+
+If you want to control pod deployment using the affinity and anti-affinity of Kubernetes, you can use `scalardb.affinity`.
+
+You can configure them using the same syntax as the affinity of Kubernetes. So, please refer to the official document [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) for more details on the affinity configuration of Kubernetes.
+
+```yaml
+scalardb:
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/name
+ operator: In
+ values:
+ - scalardb
+ - key: app.kubernetes.io/app
+ operator: In
+ values:
+ - scalardb
+ topologyKey: kubernetes.io/hostname
+ weight: 50
+```
+
+### Prometheus/Grafana configurations (Recommended in the production environment)
+
+If you want to monitor ScalarDB Server pods using [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), you can deploy a ConfigMap, a ServiceMonitor, and a PrometheusRule resource for kube-prometheus-stack using `scalardb.grafanaDashboard.enabled`, `scalardb.serviceMonitor.enabled`, and `scalardb.prometheusRule.enabled`.
+
+```yaml
+scalardb:
+ grafanaDashboard:
+ enabled: true
+ namespace: monitoring
+ serviceMonitor:
+ enabled: true
+ namespace: monitoring
+ interval: 15s
+ prometheusRule:
+ enabled: true
+ namespace: monitoring
+```
+
+### SecurityContext configurations (Default value is recommended)
+
+If you want to set SecurityContext and PodSecurityContext for ScalarDB Server pods, you can use `scalardb.securityContext` and `scalardb.podSecurityContext`.
+
+You can configure them using the same syntax as SecurityContext and PodSecurityContext of Kubernetes. So, please refer to the official document [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for more details on the SecurityContext and PodSecurityContext configurations of Kubernetes.
+
+```yaml
+scalardb:
+ podSecurityContext:
+ seccompProfile:
+ type: RuntimeDefault
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ runAsNonRoot: true
+ allowPrivilegeEscalation: false
+```
+
+### Replica configurations (Optional based on your environment)
+
+You can specify the number of replicas (pods) of ScalarDB Server using `scalardb.replicaCount`.
+
+```yaml
+scalardb:
+ replicaCount: 3
+```
+
+### Logging configurations (Optional based on your environment)
+
+If you want to change the log level of ScalarDB Server, you can use `scalardb.storageConfiguration.dbLogLevel`.
+
+```yaml
+scalardb:
+ storageConfiguration:
+ dbLogLevel: INFO
+```
+
+### Taint and toleration configurations (Optional based on your environment)
+
+If you want to control pod deployment by using the taints and tolerations in Kubernetes, you can use `scalardb.tolerations`.
+
+You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
+
+```yaml
+scalardb:
+ tolerations:
+ - effect: NoSchedule
+ key: scalar-labs.com/dedicated-node
+ operator: Equal
+ value: scalardb
+```
diff --git a/docs/3.12/helm-charts/configure-custom-values-scalardl-auditor.md b/docs/3.12/helm-charts/configure-custom-values-scalardl-auditor.md
new file mode 100644
index 00000000..39543bc9
--- /dev/null
+++ b/docs/3.12/helm-charts/configure-custom-values-scalardl-auditor.md
@@ -0,0 +1,191 @@
+# Configure a custom values file for ScalarDL Auditor
+
+This document explains how to create your custom values file for the ScalarDL Auditor chart. If you want to know the details of the parameters, please refer to the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalardl-audit/README.md) of the ScalarDL Auditor chart.
+
+## Required configurations
+
+### Scalar Envoy configurations
+
+You must set the Scalar Envoy configurations in the custom values file for ScalarDL Auditor. This is because client requests are sent to ScalarDL Auditor via Scalar Envoy as the load balancer of gRPC requests if you deploy ScalarDL Auditor on a Kubernetes environment.
+
+Please refer to the document [Configure a custom values file for Scalar Envoy](configure-custom-values-envoy.md) for more details on the Scalar Envoy configurations.
+
+```yaml
+envoy:
+ configurationsForScalarEnvoy:
+ ...
+
+auditor:
+ configurationsForScalarDLAuditor:
+ ...
+```
+
+### Image configurations
+
+You must set `auditor.image.repository`. Be sure to specify the ScalarDL Auditor container image so that you can pull the image from the container repository.
+
+```yaml
+auditor:
+ image:
+ repository:
+```
+
+If you're using AWS or Azure, please refer to the following documents for more details:
+
+* [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md)
+* [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md)
+
+### Auditor/Database configurations
+
+You must set `auditor.auditorProperties`. Please set your `auditor.properties` to this parameter. Please refer to the [auditor.properties](https://github.com/scalar-labs/scalar/blob/master/auditor/conf/auditor.properties) for more details on the configuration of ScalarDL Auditor.
+
+```yaml
+auditor:
+ auditorProperties: |
+ scalar.db.contact_points=localhost
+ scalar.db.username=cassandra
+ scalar.db.password=cassandra
+ scalar.db.storage=cassandra
+ scalar.dl.auditor.ledger.host=
+ scalar.dl.auditor.private_key_path=/keys/auditor-key-file
+ scalar.dl.auditor.cert_path=/keys/auditor-cert-file
+```
+
+### Key/Certificate configurations
+
+You must set a private key file to `scalar.dl.auditor.private_key_path` and a certificate file to `scalar.dl.auditor.cert_path`.
+
+You must also mount the private key file and the certificate file on the ScalarDL Auditor pod.
+
+For more details on how to mount the private key file and the certificate file, refer to [Mount key and certificate files on a pod in ScalarDL Helm Charts](./mount-files-or-volumes-on-scalar-pods.md#mount-key-and-certificate-files-on-a-pod-in-scalardl-helm-charts).
+
+## Optional configurations
+
+### Resource configurations (Recommended in the production environment)
+
+If you want to control pod resources using the requests and limits of Kubernetes, you can use `auditor.resources`.
+
+Note that the resources for one pod of Scalar products are limited to 2vCPU / 4GB memory from the perspective of the commercial license. Also, when you get the pay-as-you-go containers provided from AWS Marketplace, you cannot run those containers with more than 2vCPU / 4GB memory configuration in the `resources.limits`. When you exceed this limitation, pods are automatically stopped.
+
+You can configure them using the same syntax as the requests and limits of Kubernetes. So, please refer to the official document [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for more details on the requests and limits of Kubernetes.
+
+```yaml
+auditor:
+ resources:
+ requests:
+ cpu: 2000m
+ memory: 4Gi
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+```
+
+### Secret configurations
+
+If you want to use environment variables to set some properties (e.g., credentials) in the `auditor.auditorProperties`, you can use `auditor.secretName` to specify the Secret resource that includes some credentials.
+
+For example, you can set credentials for a backend database (`scalar.db.username` and `scalar.db.password`) using environment variables, which makes your pods more secure.
+
+Please refer to the document [How to use Secret resources to pass the credentials as the environment variables into the properties file](./use-secret-for-credentials.md) for more details on how to use a Secret resource.
+
+```yaml
+auditor:
+ secretName: "auditor-credentials-secret"
+```
+
+### Affinity configurations (Recommended in the production environment)
+
+If you want to control pod deployment using the affinity and anti-affinity of Kubernetes, you can use `auditor.affinity`.
+
+You can configure them using the same syntax as the affinity of Kubernetes. So, please refer to the official document [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) for more details on the affinity configuration of Kubernetes.
+
+```yaml
+auditor:
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/name
+ operator: In
+ values:
+ - scalardl-audit
+ - key: app.kubernetes.io/app
+ operator: In
+ values:
+ - auditor
+ topologyKey: kubernetes.io/hostname
+ weight: 50
+```
+
+### Prometheus/Grafana configurations (Recommended in the production environment)
+
+If you want to monitor ScalarDL Auditor pods using [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), you can deploy a ConfigMap, a ServiceMonitor, and a PrometheusRule resource for kube-prometheus-stack using `auditor.grafanaDashboard.enabled`, `auditor.serviceMonitor.enabled`, and `auditor.prometheusRule.enabled`.
+
+```yaml
+auditor:
+ grafanaDashboard:
+ enabled: true
+ namespace: monitoring
+ serviceMonitor:
+ enabled: true
+ namespace: monitoring
+ interval: 15s
+ prometheusRule:
+ enabled: true
+ namespace: monitoring
+```
+
+### SecurityContext configurations (Default value is recommended)
+
+If you want to set SecurityContext and PodSecurityContext for ScalarDL Auditor pods, you can use `auditor.securityContext` and `auditor.podSecurityContext`.
+
+You can configure them using the same syntax as SecurityContext and PodSecurityContext of Kubernetes. So, please refer to the official document [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for more details on the SecurityContext and PodSecurityContext configurations of Kubernetes.
+
+```yaml
+auditor:
+ podSecurityContext:
+ seccompProfile:
+ type: RuntimeDefault
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ runAsNonRoot: true
+ allowPrivilegeEscalation: false
+```
+
+### Replica configurations (Optional based on your environment)
+
+You can specify the number of replicas (pods) of ScalarDL Auditor using `auditor.replicaCount`.
+
+```yaml
+auditor:
+ replicaCount: 3
+```
+
+### Logging configurations (Optional based on your environment)
+
+If you want to change the log level of ScalarDL Auditor, you can use `auditor.scalarAuditorConfiguration.auditorLogLevel`.
+
+```yaml
+auditor:
+ scalarAuditorConfiguration:
+ auditorLogLevel: INFO
+```
+
+### Taint and toleration configurations (Optional based on your environment)
+
+If you want to control pod deployment by using the taints and tolerations in Kubernetes, you can use `auditor.tolerations`.
+
+You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
+
+```yaml
+auditor:
+ tolerations:
+ - effect: NoSchedule
+ key: scalar-labs.com/dedicated-node
+ operator: Equal
+ value: scalardl-auditor
+```
diff --git a/docs/3.12/helm-charts/configure-custom-values-scalardl-ledger.md b/docs/3.12/helm-charts/configure-custom-values-scalardl-ledger.md
new file mode 100644
index 00000000..3b06fd50
--- /dev/null
+++ b/docs/3.12/helm-charts/configure-custom-values-scalardl-ledger.md
@@ -0,0 +1,191 @@
+# Configure a custom values file for ScalarDL Ledger
+
+This document explains how to create your custom values file for the ScalarDL Ledger chart. If you want to know the details of the parameters, please refer to the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalardl/README.md) of the ScalarDL Ledger chart.
+
+## Required configurations
+
+### Scalar Envoy configurations
+
+You must set the Scalar Envoy configurations in the custom values file for ScalarDL Ledger. This is because client requests are sent to ScalarDL Ledger via Scalar Envoy as the load balancer of gRPC requests if you deploy ScalarDL Ledger on a Kubernetes environment.
+
+Please refer to the document [Configure a custom values file for Scalar Envoy](configure-custom-values-envoy.md) for more details on the Scalar Envoy configurations.
+
+```yaml
+envoy:
+ configurationsForScalarEnvoy:
+ ...
+
+ledger:
+ configurationsForScalarDLLedger:
+ ...
+```
+
+### Image configurations
+
+You must set `ledger.image.repository`. Be sure to specify the ScalarDL Ledger container image so that you can pull the image from the container repository.
+
+```yaml
+ledger:
+ image:
+ repository:
+```
+
+If you're using AWS or Azure, please refer to the following documents for more details:
+
+* [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md)
+* [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md)
+
+### Ledger/Database configurations
+
+You must set `ledger.ledgerProperties`. Please set your `ledger.properties` to this parameter. Please refer to the [ledger.properties](https://github.com/scalar-labs/scalar/blob/master/ledger/conf/ledger.properties) for more details on the configuration of ScalarDL Ledger.
+
+```yaml
+ledger:
+ ledgerProperties: |
+ scalar.db.contact_points=localhost
+ scalar.db.username=cassandra
+ scalar.db.password=cassandra
+ scalar.db.storage=cassandra
+ scalar.dl.ledger.proof.enabled=true
+ scalar.dl.ledger.auditor.enabled=true
+ scalar.dl.ledger.proof.private_key_path=/keys/ledger-key-file
+```
+
+### Key/Certificate configurations
+
+If you set `scalar.dl.ledger.proof.enabled` to `true` (this configuration is required if you use ScalarDL Auditor), you must set a private key file to `scalar.dl.ledger.proof.private_key_path`.
+
+In this case, you must mount the private key file on the ScalarDL Ledger pod.
+
+For more details on how to mount the private key file, refer to [Mount key and certificate files on a pod in ScalarDL Helm Charts](./mount-files-or-volumes-on-scalar-pods.md#mount-key-and-certificate-files-on-a-pod-in-scalardl-helm-charts).
+
+## Optional configurations
+
+### Resource configurations (Recommended in the production environment)
+
+If you want to control pod resources using the requests and limits of Kubernetes, you can use `ledger.resources`.
+
+Note that the resources for one pod of Scalar products are limited to 2vCPU / 4GB memory from the perspective of the commercial license. Also, when you get the pay-as-you-go containers provided from AWS Marketplace, you cannot run those containers with more than 2vCPU / 4GB memory configuration in the `resources.limits`. When you exceed this limitation, pods are automatically stopped.
+
+You can configure them using the same syntax as the requests and limits of Kubernetes. So, please refer to the official document [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for more details on the requests and limits of Kubernetes.
+
+```yaml
+ledger:
+ resources:
+ requests:
+ cpu: 2000m
+ memory: 4Gi
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+```
+
+### Secret configurations (Recommended in the production environment)
+
+If you want to use environment variables to set some properties (e.g., credentials) in the `ledger.ledgerProperties`, you can use `ledger.secretName` to specify the Secret resource that includes some credentials.
+
+For example, you can set credentials for a backend database (`scalar.db.username` and `scalar.db.password`) using environment variables, which makes your pods more secure.
+
+Please refer to the document [How to use Secret resources to pass the credentials as the environment variables into the properties file](./use-secret-for-credentials.md) for more details on how to use a Secret resource.
+
+```yaml
+ledger:
+ secretName: "ledger-credentials-secret"
+```
+
+### Affinity configurations (Recommended in the production environment)
+
+If you want to control pod deployment using the affinity and anti-affinity of Kubernetes, you can use `ledger.affinity`.
+
+You can configure them using the same syntax as the affinity of Kubernetes. So, please refer to the official document [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) for more details on the affinity configuration of Kubernetes.
+
+```yaml
+ledger:
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/name
+ operator: In
+ values:
+ - scalardl
+ - key: app.kubernetes.io/app
+ operator: In
+ values:
+ - ledger
+ topologyKey: kubernetes.io/hostname
+ weight: 50
+```
+
+### Prometheus/Grafana configurations (Recommended in the production environment)
+
+If you want to monitor ScalarDL Ledger pods using [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), you can deploy a ConfigMap, a ServiceMonitor, and a PrometheusRule resource for kube-prometheus-stack using `ledger.grafanaDashboard.enabled`, `ledger.serviceMonitor.enabled`, and `ledger.prometheusRule.enabled`.
+
+```yaml
+ledger:
+ grafanaDashboard:
+ enabled: true
+ namespace: monitoring
+ serviceMonitor:
+ enabled: true
+ namespace: monitoring
+ interval: 15s
+ prometheusRule:
+ enabled: true
+ namespace: monitoring
+```
+
+### SecurityContext configurations (Default value is recommended)
+
+If you want to set SecurityContext and PodSecurityContext for ScalarDL Ledger pods, you can use `ledger.securityContext` and `ledger.podSecurityContext`.
+
+You can configure them using the same syntax as SecurityContext and PodSecurityContext of Kubernetes. So, please refer to the official document [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for more details on the SecurityContext and PodSecurityContext configurations of Kubernetes.
+
+```yaml
+ledger:
+ podSecurityContext:
+ seccompProfile:
+ type: RuntimeDefault
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ runAsNonRoot: true
+ allowPrivilegeEscalation: false
+```
+
+### Replica configurations (Optional based on your environment)
+
+You can specify the number of replicas (pods) of ScalarDL Ledger using `ledger.replicaCount`.
+
+```yaml
+ledger:
+ replicaCount: 3
+```
+
+### Logging configurations (Optional based on your environment)
+
+If you want to change the log level of ScalarDL Ledger, you can use `ledger.scalarLedgerConfiguration.ledgerLogLevel`.
+
+```yaml
+ledger:
+ scalarLedgerConfiguration:
+ ledgerLogLevel: INFO
+```
+
+### Taint and toleration configurations (Optional based on your environment)
+
+If you want to control pod deployment by using the taints and tolerations in Kubernetes, you can use `ledger.tolerations`.
+
+You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
+
+```yaml
+ledger:
+ tolerations:
+ - effect: NoSchedule
+ key: scalar-labs.com/dedicated-node
+ operator: Equal
+ value: scalardl-ledger
+```
diff --git a/docs/3.12/helm-charts/configure-custom-values-scalardl-schema-loader.md b/docs/3.12/helm-charts/configure-custom-values-scalardl-schema-loader.md
new file mode 100644
index 00000000..655448d7
--- /dev/null
+++ b/docs/3.12/helm-charts/configure-custom-values-scalardl-schema-loader.md
@@ -0,0 +1,89 @@
+# Configure a custom values file for ScalarDL Schema Loader
+
+This document explains how to create your custom values file for the ScalarDL Schema Loader chart. If you want to know the details of the parameters, please refer to the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/schema-loading/README.md) of the ScalarDL Schema Loader chart.
+
+## Required configurations
+
+### Image configurations
+
+You must set `schemaLoading.image.repository`. Be sure to specify the ScalarDL Schema Loader container image so that you can pull the image from the container repository.
+
+```yaml
+schemaLoading:
+ image:
+ repository:
+```
+
+If you're using AWS or Azure, please refer to the following documents for more details:
+
+* [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md)
+* [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md)
+
+### Database configurations
+
+You must set `schemaLoading.databaseProperties`. Please set your `database.properties` to access the backend database to this parameter. Please refer to the [Getting Started with ScalarDB](https://github.com/scalar-labs/scalardb/blob/master/docs/getting-started-with-scalardb.md) for more details on the database configuration of ScalarDB.
+
+```yaml
+schemaLoading:
+ databaseProperties: |
+ scalar.db.contact_points=cassandra
+ scalar.db.contact_port=9042
+ scalar.db.username=cassandra
+ scalar.db.password=cassandra
+ scalar.db.storage=cassandra
+```
+
+### Schema type configurations
+
+You must set `schemaLoading.schemaType`.
+
+If you create the schema of ScalarDL Ledger, please set `ledger`.
+
+```yaml
+schemaLoading:
+ schemaType: ledger
+```
+
+If you create the schema of ScalarDL Auditor, please set `auditor`.
+
+```yaml
+schemaLoading:
+ schemaType: auditor
+```
+
+## Optional configurations
+
+### Secret configurations (Recommended in the production environment)
+
+If you want to use environment variables to set some properties (e.g., credentials) in the `schemaLoading.databaseProperties`, you can use `schemaLoading.secretName` to specify the Secret resource that includes some credentials.
+
+For example, you can set credentials for a backend database (`scalar.db.username` and `scalar.db.password`) using environment variables, which makes your pods more secure.
+
+Please refer to the document [How to use Secret resources to pass the credentials as the environment variables into the properties file](./use-secret-for-credentials.md) for more details on how to use a Secret resource.
+
+```yaml
+schemaLoading:
+ secretName: "schema-loader-credentials-secret"
+```
+
+### Flags configurations (Optional based on your environment)
+
+You can specify several flags as an array. Please refer to the document [ScalarDB Schema Loader](https://github.com/scalar-labs/scalardb/blob/master/docs/schema-loader.md) for more details on the flags.
+
+```yaml
+schemaLoading:
+ commandArgs:
+ - "--alter"
+ - "--compaction-strategy"
+ - ""
+ - "--delete-all"
+ - "--no-backup"
+ - "--no-scaling"
+ - "--repair-all"
+ - "--replication-factor"
+ - ""
+ - "--replication-strategy"
+ - ""
+ - "--ru"
+ - ""
+```
diff --git a/docs/3.12/helm-charts/getting-started-logging.md b/docs/3.12/helm-charts/getting-started-logging.md
new file mode 100644
index 00000000..5f5aa2f4
--- /dev/null
+++ b/docs/3.12/helm-charts/getting-started-logging.md
@@ -0,0 +1,96 @@
+# Getting Started with Helm Charts (Logging using Loki Stack)
+
+This document explains how to get started with log aggregation for Scalar products on Kubernetes using Grafana Loki (with Promtail).
+
+We assume that you have already read the [getting-started with monitoring](./getting-started-monitoring.md) for Scalar products and installed kube-prometheus-stack.
+
+## What we create
+
+We will deploy the following components on a Kubernetes cluster as follows.
+
+```
++--------------------------------------------------------------------------------------------------+
+| +------------------------------------+ |
+| | loki-stack | |
+| | | +-----------------+ |
+| | +--------------+ +--------------+ | <-----------------(Log)-------------- | Scalar Products | |
+| | | Loki | | Promtail | | | | |
+| | +--------------+ +--------------+ | | +-----------+ | |
+| +------------------------------------+ | | ScalarDB | | |
+| | +-----------+ | |
+| +------------------------------------------------------+ | | |
+| | kube-prometheus-stack | | +-----------+ | |
+| | | | | ScalarDL | | |
+| | +--------------+ +--------------+ +--------------+ | -----(Monitor)----> | +-----------+ | |
+| | | Prometheus | | Alertmanager | | Grafana | | +-----------------+ |
+| | +-------+------+ +------+-------+ +------+-------+ | |
+| | | | | | |
+| | +----------------+-----------------+ | |
+| | | | |
+| +--------------------------+---------------------------+ |
+| | |
+| | Kubernetes |
++----------------------------+---------------------------------------------------------------------+
+ | <- expose to localhost (127.0.0.1) or use load balancer etc to access
+ |
+ (Access Dashboard through HTTP)
+ |
+ +----+----+
+ | Browser |
+ +---------+
+```
+
+## Step 1. Prepare a custom values file
+
+1. Get the sample file [scalar-loki-stack-custom-values.yaml](./conf/scalar-loki-stack-custom-values.yaml) for the `loki-stack` helm chart.
+
+## Step 2. Deploy `loki-stack`
+
+1. Add the `grafana` helm repository.
+
+ ```console
+ helm repo add grafana https://grafana.github.io/helm-charts
+ ```
+
+1. Deploy the `loki-stack` helm chart.
+
+ ```console
+ helm install scalar-logging-loki grafana/loki-stack -n monitoring -f scalar-loki-stack-custom-values.yaml
+ ```
+
+## Step 3. Add a Loki data source in the Grafana configuration
+
+1. Add a configuration of the Loki data source in the `scalar-prometheus-custom-values.yaml` file.
+
+ ```yaml
+ grafana:
+ additionalDataSources:
+ - name: Loki
+ type: loki
+ uid: loki
+ url: http://scalar-logging-loki:3100/
+ access: proxy
+ editable: false
+ isDefault: false
+ ```
+
+1. Apply the configuration (upgrade the deployment of `kube-prometheus-stack`).
+
+ ```console
+ helm upgrade scalar-monitoring prometheus-community/kube-prometheus-stack -n monitoring -f scalar-prometheus-custom-values.yaml
+ ```
+
+## Step 4. Access the Grafana dashboard
+
+1. Add Loki as a data source
+ - Go to Grafana http://localhost:3000 (If you use minikube)
+ - Go to `Explore` to find the added Loki
+ - You can see the collected logs in the `Explore` page
+
+## Step 5. Delete the `loki-stack` helm chart
+
+1. Uninstall `loki-stack`.
+
+ ```console
+ helm uninstall scalar-logging-loki -n monitoring
+ ```
diff --git a/docs/3.12/helm-charts/getting-started-monitoring.md b/docs/3.12/helm-charts/getting-started-monitoring.md
new file mode 100644
index 00000000..9c4993a4
--- /dev/null
+++ b/docs/3.12/helm-charts/getting-started-monitoring.md
@@ -0,0 +1,256 @@
+# Getting Started with Helm Charts (Monitoring using Prometheus Operator)
+
+This document explains how to get started with Scalar products monitoring on Kubernetes using Prometheus Operator (kube-prometheus-stack). Here, we assume that you already have a Mac or Linux environment for testing. We use **Minikube** in this document, but the steps we will show should work in any Kubernetes cluster.
+
+## What we create
+
+We will deploy the following components on a Kubernetes cluster as follows.
+
+```
++--------------------------------------------------------------------------------------------------+
+| +------------------------------------------------------+ +-----------------+ |
+| | kube-prometheus-stack | | Scalar Products | |
+| | | | | |
+| | +--------------+ +--------------+ +--------------+ | -----(Monitor)----> | +-----------+ | |
+| | | Prometheus | | Alertmanager | | Grafana | | | | ScalarDB | | |
+| | +-------+------+ +------+-------+ +------+-------+ | | +-----------+ | |
+| | | | | | | +-----------+ | |
+| | +----------------+-----------------+ | | | ScalarDL | | |
+| | | | | +-----------+ | |
+| +--------------------------+---------------------------+ +-----------------+ |
+| | |
+| | Kubernetes |
++----------------------------+---------------------------------------------------------------------+
+ | <- expose to localhost (127.0.0.1) or use load balancer etc to access
+ |
+ (Access Dashboard through HTTP)
+ |
+ +----+----+
+ | Browser |
+ +---------+
+```
+
+## Step 1. Start a Kubernetes cluster
+
+First, you need to prepare a Kubernetes cluster. If you use a **minikube** environment, please refer to the [Getting Started with Scalar Helm Charts](./getting-started-scalar-helm-charts.md). If you have already started a Kubernetes cluster, you can skip this step.
+
+## Step 2. Prepare a custom values file
+
+1. Save the sample file [scalar-prometheus-custom-values.yaml](./conf/scalar-prometheus-custom-values.yaml) for `kube-prometheus-stack`.
+
+1. Add custom values in the `scalar-prometheus-custom-values.yaml` as follows.
+ * settings
+ * `prometheus.service.type` to `LoadBalancer`
+ * `alertmanager.service.type` to `LoadBalancer`
+ * `grafana.service.type` to `LoadBalancer`
+ * `grafana.service.port` to `3000`
+ * Example
+ ```yaml
+ alertmanager:
+
+ service:
+ type: LoadBalancer
+
+ ...
+
+ grafana:
+
+ service:
+ type: LoadBalancer
+ port: 3000
+
+ ...
+
+ prometheus:
+
+ service:
+ type: LoadBalancer
+
+ ...
+ ```
+ * Note:
+ * If you want to customize the Prometheus Operator deployment by using Helm Charts, you'll need to set the following configurations to monitor Scalar products:
+ * Set `serviceMonitorSelectorNilUsesHelmValues` and `ruleSelectorNilUsesHelmValues` to `false` (`true` by default) so that Prometheus Operator can detect `ServiceMonitor` and `PrometheusRule` for Scalar products.
+
+ * If you want to use Scalar Manager, you'll need to set the following configurations to enable Scalar Manager to collect CPU and memory resources:
+ * Set `kubeStateMetrics.enabled`, `nodeExporter.enabled`, and `kubelet.enabled` to `true`.
+
+## Step 3. Deploy `kube-prometheus-stack`
+
+1. Add the `prometheus-community` helm repository.
+ ```console
+ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
+ ```
+
+1. Create a namespace `monitoring` on the Kubernetes.
+ ```console
+ kubectl create namespace monitoring
+ ```
+
+1. Deploy the `kube-prometheus-stack`.
+ ```console
+ helm install scalar-monitoring prometheus-community/kube-prometheus-stack -n monitoring -f scalar-prometheus-custom-values.yaml
+ ```
+
+## Step 4. Deploy (or Upgrade) Scalar products using Helm Charts
+
+* Note:
+ * The following explains the minimum steps. If you want to know more details about the deployment of ScalarDB and ScalarDL, please refer to the following documents.
+ * [Getting Started with Helm Charts (ScalarDB Server)](./getting-started-scalardb.md)
+ * [Getting Started with Helm Charts (ScalarDL Ledger / Ledger only)](./getting-started-scalardl-ledger.md)
+ * [Getting Started with Helm Charts (ScalarDL Ledger and Auditor / Auditor mode)](./getting-started-scalardl-auditor.md)
+
+1. To enable Prometheus monitoring of Scalar products, set `true` to the following configurations in the custom values file.
+ * Configurations
+ * `*.prometheusRule.enabled`
+ * `*.grafanaDashboard.enabled`
+ * `*.serviceMonitor.enabled`
+ * Sample configuration files
+ * ScalarDB (scalardb-custom-values.yaml)
+ ```yaml
+ envoy:
+ prometheusRule:
+ enabled: true
+ grafanaDashboard:
+ enabled: true
+ serviceMonitor:
+ enabled: true
+
+ scalardb:
+ prometheusRule:
+ enabled: true
+ grafanaDashboard:
+ enabled: true
+ serviceMonitor:
+ enabled: true
+ ```
+ * ScalarDL Ledger (scalardl-ledger-custom-values.yaml)
+ ```yaml
+ envoy:
+ prometheusRule:
+ enabled: true
+ grafanaDashboard:
+ enabled: true
+ serviceMonitor:
+ enabled: true
+
+ ledger:
+ prometheusRule:
+ enabled: true
+ grafanaDashboard:
+ enabled: true
+ serviceMonitor:
+ enabled: true
+ ```
+ * ScalarDL Auditor (scalardl-auditor-custom-values.yaml)
+ ```yaml
+ envoy:
+ prometheusRule:
+ enabled: true
+ grafanaDashboard:
+ enabled: true
+ serviceMonitor:
+ enabled: true
+
+ auditor:
+ prometheusRule:
+ enabled: true
+ grafanaDashboard:
+ enabled: true
+ serviceMonitor:
+ enabled: true
+ ```
+
+1. Deploy (or Upgrade) Scalar products using Helm Charts with the above custom values file.
+ * Examples
+ * ScalarDB
+ ```console
+ helm install scalardb scalar-labs/scalardb -f ./scalardb-custom-values.yaml
+ ```
+ ```console
+ helm upgrade scalardb scalar-labs/scalardb -f ./scalardb-custom-values.yaml
+ ```
+ * ScalarDL Ledger
+ ```console
+ helm install scalardl-ledger scalar-labs/scalardl -f ./scalardl-ledger-custom-values.yaml
+ ```
+ ```console
+ helm upgrade scalardl-ledger scalar-labs/scalardl -f ./scalardl-ledger-custom-values.yaml
+ ```
+ * ScalarDL Auditor
+ ```console
+ helm install scalardl-auditor scalar-labs/scalardl-audit -f ./scalardl-auditor-custom-values.yaml
+ ```
+ ```console
+ helm upgrade scalardl-auditor scalar-labs/scalardl-audit -f ./scalardl-auditor-custom-values.yaml
+ ```
+
+## Step 5. Access Dashboards
+
+### If you use minikube
+
+1. To expose each service resource as your `localhost (127.0.0.1)`, open another terminal, and run the `minikube tunnel` command.
+ ```console
+ minikube tunnel
+ ```
+
+ After running the `minikube tunnel` command, you can see the EXTERNAL-IP of each service resource as `127.0.0.1`.
+ ```console
+ kubectl get svc -n monitoring scalar-monitoring-kube-pro-prometheus scalar-monitoring-kube-pro-alertmanager scalar-monitoring-grafana
+ ```
+ [Command execution result]
+ ```console
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ scalar-monitoring-kube-pro-prometheus LoadBalancer 10.98.11.12 127.0.0.1 9090:30550/TCP 26m
+ scalar-monitoring-kube-pro-alertmanager LoadBalancer 10.98.151.66 127.0.0.1 9093:31684/TCP 26m
+ scalar-monitoring-grafana LoadBalancer 10.103.19.4 127.0.0.1 3000:31948/TCP 26m
+ ```
+
+1. Access each Dashboard.
+ * Prometheus
+ ```console
+ http://localhost:9090/
+ ```
+ * Alertmanager
+ ```console
+ http://localhost:9093/
+ ```
+ * Grafana
+ ```console
+ http://localhost:3000/
+ ```
+ * Note:
+ * You can see the user and password of Grafana as follows.
+ * user
+ ```console
+ kubectl get secrets scalar-monitoring-grafana -n monitoring -o jsonpath='{.data.admin-user}' | base64 -d
+ ```
+ * password
+ ```console
+ kubectl get secrets scalar-monitoring-grafana -n monitoring -o jsonpath='{.data.admin-password}' | base64 -d
+ ```
+
+### If you use other Kubernetes than minikube
+
+If you use a Kubernetes cluster other than minikube, you need to access the LoadBalancer service according to the manner of each Kubernetes cluster. For example, using a Load Balancer provided by cloud service or the `kubectl port-forward` command.
+
+## Step 6. Delete all resources
+
+After completing the Monitoring tests on the Kubernetes cluster, remove all resources.
+
+1. Terminate the `minikube tunnel` command. (If you use minikube)
+ ```console
+ Ctrl + C
+ ```
+
+1. Uninstall `kube-prometheus-stack`.
+ ```console
+ helm uninstall scalar-monitoring -n monitoring
+ ```
+
+1. Delete minikube. (Optional / If you use minikube)
+ ```console
+ minikube delete --all
+ ```
+ * Note:
+ * If you deploy the ScalarDB or ScalarDL, you need to remove them before deleting minikube.
diff --git a/docs/3.12/helm-charts/getting-started-scalar-helm-charts.md b/docs/3.12/helm-charts/getting-started-scalar-helm-charts.md
new file mode 100644
index 00000000..4e1005e0
--- /dev/null
+++ b/docs/3.12/helm-charts/getting-started-scalar-helm-charts.md
@@ -0,0 +1,62 @@
+# Getting Started with Scalar Helm Charts
+
+This document explains how to get started with Scalar Helm Chart on a Kubernetes cluster as a test environment. Here, we assume that you already have a Mac or Linux environment for testing. We use **Minikube** in this document, but the steps we will show should work in any Kubernetes cluster.
+
+## Tools
+
+We will use the following tools for testing.
+
+1. minikube (If you use other Kubernetes distributions, minikube is not necessary.)
+1. kubectl
+1. Helm
+1. cfssl / cfssljson
+
+## Step 1. Install tools
+
+First, you need to install the following tools used in this guide.
+
+1. Install the minikube according to the [minikube document](https://minikube.sigs.k8s.io/docs/start/)
+
+1. Install the kubectl according to the [Kubernetes document](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/)
+
+1. Install the helm command according to the [Helm document](https://helm.sh/docs/intro/install/)
+
+1. Install the cfssl and cfssljson according to the [CFSSL document](https://github.com/cloudflare/cfssl)
+ * Note:
+ * You need the cfssl and cfssljson when you try ScalarDL. If you try Scalar Helm Charts other than ScalarDL (e.g., ScalarDB, Monitoring, Logging, etc...), the cfssl and cfssljson are not necessary.
+
+## Step 2. Start minikube with docker driver (Optional / If you use minikube)
+
+1. Start minikube.
+ ```console
+ minikube start
+ ```
+
+1. Check the status of the minikube and pods.
+ ```console
+ kubectl get pod -A
+ ```
+ [Command execution result]
+ ```console
+ NAMESPACE NAME READY STATUS RESTARTS AGE
+ kube-system coredns-64897985d-lbsfr 1/1 Running 1 (20h ago) 21h
+ kube-system etcd-minikube 1/1 Running 1 (20h ago) 21h
+ kube-system kube-apiserver-minikube 1/1 Running 1 (20h ago) 21h
+ kube-system kube-controller-manager-minikube 1/1 Running 1 (20h ago) 21h
+ kube-system kube-proxy-gsl6j 1/1 Running 1 (20h ago) 21h
+ kube-system kube-scheduler-minikube 1/1 Running 1 (20h ago) 21h
+ kube-system storage-provisioner 1/1 Running 2 (19s ago) 21h
+ ```
+ If the minikube starts properly, you can see some pods are **Running** in the kube-system namespace.
+
+## Step 3.
+
+After the Kubernetes cluster starts, you can try each Scalar Helm Charts on it. Please refer to the following documents for more details.
+
+* [ScalarDB Analytics with PostgreSQL](./getting-started-scalardb-analytics-postgresql.md)
+* [ScalarDL Ledger (Ledger only)](./getting-started-scalardl-ledger.md)
+* [ScalarDL Ledger and Auditor (Auditor mode)](./getting-started-scalardl-auditor.md)
+* [Monitoring using Prometheus Operator](./getting-started-monitoring.md)
+ * [Logging using Loki Stack](./getting-started-logging.md)
+ * [Scalar Manager](./getting-started-scalar-manager.md)
+* [[Deprecated] ScalarDB Server](./getting-started-scalardb.md)
diff --git a/docs/3.12/helm-charts/getting-started-scalar-manager.md b/docs/3.12/helm-charts/getting-started-scalar-manager.md
new file mode 100644
index 00000000..1864bab3
--- /dev/null
+++ b/docs/3.12/helm-charts/getting-started-scalar-manager.md
@@ -0,0 +1,154 @@
+# Getting Started with Helm Charts (Scalar Manager)
+Scalar Manager is a web-based dashboard that allows users to:
+* check the health of the Scalar products
+* pause and unpause the Scalar products to backup or restore underlying databases
+* check the metrics and logs of the Scalar products through Grafana dashboards
+
+The users can pause or unpause Scalar products through Scalar Manager to backup or restore the underlying databases.
+Scalar Manager also embeds Grafana explorers by which the users can review the metrics or logs of the Scalar products.
+
+## Assumption
+This guide assumes that the users are aware of how to deploy Scalar products with the monitoring and logging tools to a Kubernetes cluster.
+If not, please start with [Getting Started with Scalar Helm Charts](./getting-started-scalar-helm-charts.md) before this guide.
+
+## Requirement
+
+* You need privileges to pull the Scalar Manager container (`scalar-manager`) from [GitHub Packages](https://github.com/orgs/scalar-labs/packages).
+* You must create a Github Personal Access Token (PAT) with `read:packages` scope according to the [GitHub document](https://docs.github.com/en/github/authenticating-to-github/keeping-your-account-and-data-secure/creating-a-personal-access-token) to pull the above container.
+
+## What we create
+
+We will deploy the following components on a Kubernetes cluster as follows.
+
+```
++--------------------------------------------------------------------------------------------------+
+| +----------------------+ |
+| | scalar-manager | |
+| | | |
+| | +------------------+ | --------------------------(Manage)--------------------------+ |
+| | | Scalar Manager | | | |
+| | +------------------+ | | |
+| +--+-------------------+ | |
+| | | |
+| +------------------------------------+ | |
+| | loki-stack | V |
+| | | +-----------------+ |
+| | +--------------+ +--------------+ | <----------------(Log)--------------- | Scalar Products | |
+| | | Loki | | Promtail | | | | |
+| | +--------------+ +--------------+ | | +-----------+ | |
+| +------------------------------------+ | | ScalarDB | | |
+| | | +-----------+ | |
+| +------------------------------------------------------+ | | |
+| | kube-prometheus-stack | | +-----------+ | |
+| | | | | ScalarDL | | |
+| | +--------------+ +--------------+ +--------------+ | -----(Monitor)----> | +-----------+ | |
+| | | Prometheus | | Alertmanager | | Grafana | | +-----------------+ |
+| | +-------+------+ +------+-------+ +------+-------+ | |
+| | | | | | |
+| | +----------------+-----------------+ | |
+| | | | |
+| +--------------------------+---------------------------+ |
+| | | |
+| | | Kubernetes |
++----+-----------------------+---------------------------------------------------------------------+
+ | |
+ expose to localhost (127.0.0.1) or use load balancer etc to access
+ | |
+ (Access Dashboard through HTTP)
+ | |
++----+----+ +----+----+
+| Browser | <-(Embed)-- + Browser |
++---------+ +---------+
+```
+
+## Step 1. Upgrade the `kube-prometheus-stack` to allow Grafana to be embedded
+
+1. Add or revise this value to the custom values file (e.g. scalar-prometheus-custom-values.yaml) of the `kube-prometheus-stack`
+
+ ```yaml
+ grafana:
+ grafana.ini:
+ security:
+ allow_embedding: true
+ cookie_samesite: disabled
+ ```
+
+1. Upgrade the Helm installation
+
+ ```console
+ helm upgrade scalar-monitoring prometheus-community/kube-prometheus-stack -n monitoring -f scalar-prometheus-custom-values.yaml
+ ```
+
+## Step 2. Prepare a custom values file for Scalar Manager
+
+1. Get the sample file [scalar-manager-custom-values.yaml](./conf/scalar-manager-custom-values.yaml) for `scalar-manager`.
+
+1. Add the targets that you would like to manage. For example, if we want to manage a ledger cluster, then we can add the values as follows.
+
+ ```yaml
+ scalarManager:
+ targets:
+ - name: my-ledgers-cluster
+ adminSrv: _scalardl-admin._tcp.scalardl-headless.default.svc.cluster.local
+ databaseType: cassandra
+ ```
+
+ Note: the `adminSrv` is the DNS Service URL that returns SRV record of pods. Kubernetes creates this URL for the named port of the headless service of the Scalar product. The format is `_{port name}._{protocol}.{service name}.{namespace}.svc.{cluster domain name}`
+
+1. Set the Grafana URL. For example, if your Grafana of the `kube-prometheus-stack` is exposed in `localhost:3000`, then we can set it as follows.
+
+ ```yaml
+ scalarManager:
+ grafanaUrl: "http://localhost:3000"
+ ```
+
+1. Set the refresh interval that Scalar Manager checks the status of the products. The default value is `30` seconds, but we can change it like:
+
+ ```yaml
+ scalarManager:
+ refreshInterval: 60 # one minute
+ ```
+
+1. Set the service type to access Scalar Manager. The default value is `ClusterIP`, but if we access using the `minikube tunnel` command or some load balancer, we can set it as `LoadBalancer`.
+
+ ```yaml
+ service:
+ type: LoadBalancer
+ ```
+
+## Step 3. Deploy `scalar-manager`
+
+1. Create a secret resource `reg-docker-secrets` to pull the Scalar Manager container image from GitHub Packages.
+
+ ```console
+ kubectl create secret docker-registry reg-docker-secrets --docker-server=ghcr.io --docker-username= --docker-password=
+ ```
+
+1. Deploy the `scalar-manager` Helm Chart.
+
+ ```console
+ helm install scalar-manager scalar-labs/scalar-manager -f scalar-manager-custom-values.yaml
+ ```
+
+## Step 4. Access Scalar Manager
+
+### If you use minikube
+
+1. To expose Scalar Manager's service resource as your `localhost (127.0.0.1)`, open another terminal, and run the `minikube tunnel` command.
+
+ ```console
+ minikube tunnel
+ ```
+
+1. Open the browser with URL `http://localhost:8000`
+
+### If you use other Kubernetes than minikube
+
+If you use a Kubernetes cluster other than minikube, you need to access the LoadBalancer service according to the manner of each Kubernetes cluster. For example, using a Load Balancer provided by cloud service or the `kubectl port-forward` command.
+
+## Step 5. Delete Scalar Manager
+1. Uninstall `scalar-manager`
+
+ ```console
+ helm uninstall scalar-manager
+ ```
diff --git a/docs/3.12/helm-charts/getting-started-scalardb-analytics-postgresql.md b/docs/3.12/helm-charts/getting-started-scalardb-analytics-postgresql.md
new file mode 100644
index 00000000..1e2fe8fa
--- /dev/null
+++ b/docs/3.12/helm-charts/getting-started-scalardb-analytics-postgresql.md
@@ -0,0 +1,510 @@
+# Getting Started with Helm Charts (ScalarDB Analytics with PostgreSQL)
+
+This guide explains how to get started with ScalarDB Analytics with PostgreSQL by using a Helm Chart in a Kubernetes cluster as a test environment. In addition, the contents of this guide assume that you already have a Mac or Linux environment set up for testing. Although **minikube** is mentioned, the steps described should work in any Kubernetes cluster.
+
+## What you will create
+
+You will deploy the following components in a Kubernetes cluster:
+
+```
++-------------------------------------------------------------------------------------------------------------------------------------------+
+| [Kubernetes cluster] |
+| |
+| [Pod] [Pod] [Pod] |
+| |
+| +------------------------------------+ |
+| +---> | ScalarDB Analytics with PostgreSQL | ---+ +-----------------------------+ |
+| | +------------------------------------+ | +---> | MySQL ("customer" schema) | <---+ |
+| | | | +-----------------------------+ | |
+| +-------------+ +---------+ | +------------------------------------+ | | | |
+| | OLAP client | ---> | Service | ---+---> | ScalarDB Analytics with PostgreSQL | ---+---+ +---+ |
+| +-------------+ +---------+ | +------------------------------------+ | | | | |
+| | | | +-----------------------------+ | | |
+| | +------------------------------------+ | +---> | PostgreSQL ("order" schema) | <---+ | |
+| +---> | ScalarDB Analytics with PostgreSQL | ---+ +-----------------------------+ | |
+| +------------------------------------+ | |
+| | |
+| +-------------+ | |
+| | OLTP client | ---(Load sample data with a test OLTP workload)-----------------------------------------------------------------------+ |
+| +-------------+ |
+| |
++-------------------------------------------------------------------------------------------------------------------------------------------+
+```
+
+## Step 1. Start a Kubernetes cluster
+
+First, you need to prepare a Kubernetes cluster. If you're using a **minikube** environment, please refer to the [Getting Started with Scalar Helm Charts](./getting-started-scalar-helm-charts.md). If you have already started a Kubernetes cluster, you can skip this step.
+
+## Step 2. Start MySQL and PostgreSQL pods
+
+ScalarDB including ScalarDB Analytics with PostgreSQL can use several types of database systems as a backend database. In this guide, you will use MySQL and PostgreSQL.
+
+You can deploy MySQL and PostgreSQL on the Kubernetes cluster as follows:
+
+1. Add the Bitnami helm repository.
+
+ ```console
+ helm repo add bitnami https://charts.bitnami.com/bitnami
+ ```
+
+1. Update the helm repository.
+
+ ```console
+ helm repo update bitnami
+ ```
+
+1. Deploy MySQL.
+
+ ```console
+ helm install mysql-scalardb bitnami/mysql \
+ --set auth.rootPassword=mysql \
+ --set primary.persistence.enabled=false
+ ```
+
+1. Deploy PostgreSQL.
+
+ ```console
+ helm install postgresql-scalardb bitnami/postgresql \
+ --set auth.postgresPassword=postgres \
+ --set primary.persistence.enabled=false
+ ```
+
+1. Check if the MySQL and PostgreSQL pods are running.
+
+ ```console
+ kubectl get pod
+ ```
+
+ You should see the following output:
+
+ ```console
+ $ kubectl get pod
+ NAME READY STATUS RESTARTS AGE
+ mysql-scalardb-0 1/1 Running 0 3m17s
+ postgresql-scalardb-0 1/1 Running 0 3m12s
+ ```
+
+## Step 3. Create a working directory
+
+Since you'll be creating some configuration files locally, create a working directory for those files.
+
+ ```console
+ mkdir -p ~/scalardb-analytics-postgresql-test/
+ ```
+
+## Step 4. Set the versions of ScalarDB, ScalarDB Analytics with PostgreSQL, and the chart
+
+Set the following three environment variables. If you want to use another version of ScalarDB and ScalarDB Analytics with PostgreSQL, be sure to set them to the versions that you want to use.
+
+{% capture notice--info %}
+**Note**
+
+You must use the same minor versions (for example, 3.10.x) of ScalarDB Analytics with PostgreSQL as ScalarDB, but you don't need to make the patch versions match. For example, you can use ScalarDB 3.10.1 and ScalarDB Analytics with PostgreSQL 3.10.3 together.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+```console
+SCALARDB_VERSION=3.10.1
+```
+
+```console
+SCALARDB_ANALYTICS_WITH_POSTGRESQL_VERSION=3.10.3
+```
+
+```console
+CHART_VERSION=$(helm search repo scalar-labs/scalardb-analytics-postgresql -l | grep -e ${SCALARDB_ANALYTICS_WITH_POSTGRESQL_VERSION} | awk '{print $2}' | sort --version-sort -r | head -n 1)
+```
+
+## Step 5. Run OLTP transactions to load sample data to MySQL and PostgreSQL
+
+Before deploying ScalarDB Analytics with PostgreSQL, run the OLTP transactions to create sample data.
+
+1. Start an OLTP client pod in the Kubernetes cluster.
+
+ ```console
+ kubectl run oltp-client --image eclipse-temurin:8-jdk-jammy --env SCALARDB_VERSION=${SCALARDB_VERSION} -- sleep inf
+ ```
+
+1. Check if the OLTP client pod is running.
+
+ ```console
+ kubectl get pod oltp-client
+ ```
+
+ You should see the following output:
+
+ ```console
+ $ kubectl get pod oltp-client
+ NAME READY STATUS RESTARTS AGE
+ oltp-client 1/1 Running 0 17s
+ ```
+
+1. Run bash in the OLTP client pod.
+
+ ```console
+ kubectl exec -it oltp-client -- bash
+ ```
+
+ After this step, run each command in the OLTP client pod.
+
+1. Install the git and curl commands in the OLTP client pod.
+
+ ```console
+ apt update && apt install -y curl git
+ ```
+
+1. Clone the ScalarDB samples repository.
+
+ ```console
+ git clone https://github.com/scalar-labs/scalardb-samples.git
+ ```
+
+1. Go to the directory `scalardb-samples/multi-storage-transaction-sample/`.
+
+ ```console
+ cd scalardb-samples/multi-storage-transaction-sample/
+ ```
+
+ ```console
+ pwd
+ ```
+
+ You should see the following output:
+
+ ```console
+ # pwd
+ /scalardb-samples/multi-storage-transaction-sample
+ ```
+
+1. Create a configuration file (`database.properties`) to access MySQL and PostgreSQL in the Kubernetes cluster.
+
+ ```console
+ cat << 'EOF' > database.properties
+ scalar.db.storage=multi-storage
+ scalar.db.multi_storage.storages=storage0,storage1
+
+ # Storage 0
+ scalar.db.multi_storage.storages.storage0.storage=jdbc
+ scalar.db.multi_storage.storages.storage0.contact_points=jdbc:mysql://mysql-scalardb.default.svc.cluster.local:3306/
+ scalar.db.multi_storage.storages.storage0.username=root
+ scalar.db.multi_storage.storages.storage0.password=mysql
+
+ # Storage 1
+ scalar.db.multi_storage.storages.storage1.storage=jdbc
+ scalar.db.multi_storage.storages.storage1.contact_points=jdbc:postgresql://postgresql-scalardb.default.svc.cluster.local:5432/postgres
+ scalar.db.multi_storage.storages.storage1.username=postgres
+ scalar.db.multi_storage.storages.storage1.password=postgres
+
+ scalar.db.multi_storage.namespace_mapping=customer:storage0,order:storage1
+ scalar.db.multi_storage.default_storage=storage1
+ EOF
+ ```
+
+1. Download Schema Loader from [ScalarDB Releases](https://github.com/scalar-labs/scalardb/releases).
+
+ ```console
+ curl -OL https://github.com/scalar-labs/scalardb/releases/download/v${SCALARDB_VERSION}/scalardb-schema-loader-${SCALARDB_VERSION}.jar
+ ```
+
+1. Run Schema Loader to create sample tables.
+
+ ```console
+ java -jar scalardb-schema-loader-${SCALARDB_VERSION}.jar --config database.properties --schema-file schema.json --coordinator
+ ```
+
+1. Load initial data for the sample workload.
+
+ ```console
+ ./gradlew run --args="LoadInitialData"
+ ```
+
+1. Run the sample workload of OLTP transactions. Running these commands will create several `order` entries as sample data.
+
+ ```console
+ ./gradlew run --args="PlaceOrder 1 1:3,2:2"
+ ```
+
+ ```console
+ ./gradlew run --args="PlaceOrder 1 5:1"
+ ```
+
+ ```console
+ ./gradlew run --args="PlaceOrder 2 3:1,4:1"
+ ```
+
+ ```console
+ ./gradlew run --args="PlaceOrder 2 2:1"
+ ```
+
+ ```console
+ ./gradlew run --args="PlaceOrder 3 1:1"
+ ```
+
+ ```console
+ ./gradlew run --args="PlaceOrder 3 2:1"
+ ```
+
+ ```console
+ ./gradlew run --args="PlaceOrder 3 3:1"
+ ```
+
+ ```console
+ ./gradlew run --args="PlaceOrder 3 5:1"
+ ```
+
+
+1. Exit from OLTP client.
+
+ ```console
+ exit
+ ```
+
+## Step 6. Deploy ScalarDB Analytics with PostgreSQL
+
+After creating sample data via ScalarDB in the backend databases, deploy ScalarDB Analytics with PostgreSQL.
+
+1. Create a custom values file for ScalarDB Analytics with PostgreSQL (`scalardb-analytics-postgresql-custom-values.yaml`).
+
+ ```console
+ cat << 'EOF' > ~/scalardb-analytics-postgresql-test/scalardb-analytics-postgresql-custom-values.yaml
+ scalardbAnalyticsPostgreSQL:
+ databaseProperties: |
+ scalar.db.storage=multi-storage
+ scalar.db.multi_storage.storages=storage0,storage1
+
+ # Storage 0
+ scalar.db.multi_storage.storages.storage0.storage=jdbc
+ scalar.db.multi_storage.storages.storage0.contact_points=jdbc:mysql://mysql-scalardb.default.svc.cluster.local:3306/
+ scalar.db.multi_storage.storages.storage0.username=root
+ scalar.db.multi_storage.storages.storage0.password=mysql
+
+ # Storage 1
+ scalar.db.multi_storage.storages.storage1.storage=jdbc
+ scalar.db.multi_storage.storages.storage1.contact_points=jdbc:postgresql://postgresql-scalardb.default.svc.cluster.local:5432/postgres
+ scalar.db.multi_storage.storages.storage1.username=postgres
+ scalar.db.multi_storage.storages.storage1.password=postgres
+
+ scalar.db.multi_storage.namespace_mapping=customer:storage0,order:storage1
+ scalar.db.multi_storage.default_storage=storage1
+ schemaImporter:
+ namespaces:
+ - customer
+ - order
+ EOF
+ ```
+
+1. Create a secret resource to set a superuser password for PostgreSQL.
+
+ ```console
+ kubectl create secret generic scalardb-analytics-postgresql-superuser-password --from-literal=superuser-password=scalardb-analytics
+ ```
+
+1. Deploy ScalarDB Analytics with PostgreSQL.
+
+ ```console
+ helm install scalardb-analytics-postgresql scalar-labs/scalardb-analytics-postgresql -n default -f ~/scalardb-analytics-postgresql-test/scalardb-analytics-postgresql-custom-values.yaml --version ${CHART_VERSION}
+ ```
+
+## Step 7. Run an OLAP client pod
+
+To run some queries via ScalarDB Analytics with PostgreSQL, run an OLAP client pod.
+
+1. Start an OLAP client pod in the Kubernetes cluster.
+
+ ```console
+ kubectl run olap-client --image postgres:latest -- sleep inf
+ ```
+
+1. Check if the OLAP client pod is running.
+
+ ```console
+ kubectl get pod olap-client
+ ```
+
+ You should see the following output:
+
+ ```console
+ $ kubectl get pod olap-client
+ NAME READY STATUS RESTARTS AGE
+ olap-client 1/1 Running 0 10s
+ ```
+
+## Step 8. Run sample queries via ScalarDB Analytics with PostgreSQL
+
+After running the OLAP client pod, you can run some queries via ScalarDB Analytics with PostgreSQL.
+
+1. Run bash in the OLAP client pod.
+
+ ```console
+ kubectl exec -it olap-client -- bash
+ ```
+
+ After this step, run each command in the OLAP client pod.
+
+1. Run the psql command to access ScalarDB Analytics with PostgreSQL.
+
+ ```console
+ psql -h scalardb-analytics-postgresql -p 5432 -U postgres -d scalardb
+ ```
+
+ The password is `scalardb-analytics`.
+
+1. Read sample data in the `customer.customers` table.
+
+ ```sql
+ SELECT * FROM customer.customers;
+ ```
+
+ You should see the following output:
+
+ ```sql
+ customer_id | name | credit_limit | credit_total
+ -------------+---------------+--------------+--------------
+ 1 | Yamada Taro | 10000 | 10000
+ 2 | Yamada Hanako | 10000 | 9500
+ 3 | Suzuki Ichiro | 10000 | 8500
+ (3 rows)
+ ```
+
+1. Read sample data in the `order.orders` table.
+
+ ```sql
+ SELECT * FROM "order".orders;
+ ```
+
+ You should see the following output:
+
+ ```sql
+ scalardb=# SELECT * FROM "order".orders;
+ customer_id | timestamp | order_id
+ -------------+---------------+--------------------------------------
+ 1 | 1700124015601 | 5ae2a41b-990d-4a16-9700-39355e29adf8
+ 1 | 1700124021273 | f3f23d93-3862-48be-8a57-8368b7c8689e
+ 2 | 1700124028182 | 696a895a-8998-4c3b-b112-4d5763bfcfd8
+ 2 | 1700124036158 | 9215d63a-a9a2-4471-a990-45897f091ca5
+ 3 | 1700124043744 | 9be70cd4-4f93-4753-9d89-68e250b2ac51
+ 3 | 1700124051162 | 4e8ce2d2-488c-40d6-aa52-d9ecabfc68a8
+ 3 | 1700124058096 | 658b6682-2819-41f2-91ee-2802a1f02857
+ 3 | 1700124071240 | 4e2f94f4-53ec-4570-af98-7c648d8ed80f
+ (8 rows)
+ ```
+
+1. Read sample data in the `order.statements` table.
+
+ ```sql
+ SELECT * FROM "order".statements;
+ ```
+
+ You should see the following output:
+
+ ```sql
+ scalardb=# SELECT * FROM "order".statements;
+ order_id | item_id | count
+ --------------------------------------+---------+-------
+ 5ae2a41b-990d-4a16-9700-39355e29adf8 | 2 | 2
+ 5ae2a41b-990d-4a16-9700-39355e29adf8 | 1 | 3
+ f3f23d93-3862-48be-8a57-8368b7c8689e | 5 | 1
+ 696a895a-8998-4c3b-b112-4d5763bfcfd8 | 4 | 1
+ 696a895a-8998-4c3b-b112-4d5763bfcfd8 | 3 | 1
+ 9215d63a-a9a2-4471-a990-45897f091ca5 | 2 | 1
+ 9be70cd4-4f93-4753-9d89-68e250b2ac51 | 1 | 1
+ 4e8ce2d2-488c-40d6-aa52-d9ecabfc68a8 | 2 | 1
+ 658b6682-2819-41f2-91ee-2802a1f02857 | 3 | 1
+ 4e2f94f4-53ec-4570-af98-7c648d8ed80f | 5 | 1
+ (10 rows)
+ ```
+
+1. Read sample data in the `order.items` table.
+
+ ```sql
+ SELECT * FROM "order".items;
+ ```
+
+ You should see the following output:
+
+ ```sql
+ scalardb=# SELECT * FROM "order".items;
+ item_id | name | price
+ ---------+--------+-------
+ 5 | Melon | 3000
+ 2 | Orange | 2000
+ 4 | Mango | 5000
+ 1 | Apple | 1000
+ 3 | Grape | 2500
+ (5 rows)
+ ```
+
+1. Run the `JOIN` query. For example, you can see the credit remaining information of each user as follows.
+
+ ```sql
+ SELECT * FROM (
+ SELECT c.name, c.credit_limit - c.credit_total AS remaining, array_agg(i.name) OVER (PARTITION BY c.name) AS items
+ FROM "order".orders o
+ JOIN customer.customers c ON o.customer_id = c.customer_id
+ JOIN "order".statements s ON o.order_id = s.order_id
+ JOIN "order".items i ON s.item_id = i.item_id
+ ) AS remaining_info GROUP BY name, remaining, items;
+ ```
+
+ You should see the following output:
+
+ ```sql
+ scalardb=# SELECT * FROM (
+ scalardb(# SELECT c.name, c.credit_limit - c.credit_total AS remaining, array_agg(i.name) OVER (PARTITION BY c.name) AS items
+ scalardb(# FROM "order".orders o
+ scalardb(# JOIN customer.customers c ON o.customer_id = c.customer_id
+ scalardb(# JOIN "order".statements s ON o.order_id = s.order_id
+ scalardb(# JOIN "order".items i ON s.item_id = i.item_id
+ scalardb(# ) AS remaining_info GROUP BY name, remaining, items;
+ name | remaining | items
+ ---------------+-----------+----------------------------
+ Suzuki Ichiro | 1500 | {Grape,Orange,Apple,Melon}
+ Yamada Hanako | 500 | {Orange,Grape,Mango}
+ Yamada Taro | 0 | {Orange,Melon,Apple}
+ (3 rows)
+ ```
+
+1. Exit from the psql command.
+
+ ```console
+ \q
+ ```
+
+1. Exit from the OLAP client pod.
+
+ ```console
+ exit
+ ```
+
+## Step 9. Delete all resources
+
+After completing the ScalarDB Analytics with PostgreSQL tests on the Kubernetes cluster, remove all resources.
+
+1. Uninstall MySQL, PostgreSQL, and ScalarDB Analytics with PostgreSQL.
+
+ ```console
+ helm uninstall mysql-scalardb postgresql-scalardb scalardb-analytics-postgresql
+ ```
+
+1. Remove the client pods.
+
+ ```console
+ kubectl delete pod oltp-client olap-client --grace-period 0
+ ```
+
+1. Remove the secret resource.
+
+ ```console
+ kubectl delete secrets scalardb-analytics-postgresql-superuser-password
+ ```
+
+1. Remove the working directory and sample files.
+
+ ```console
+ cd ~
+ ```
+
+ ```console
+ rm -rf ~/scalardb-analytics-postgresql-test/
+ ```
diff --git a/docs/3.12/helm-charts/getting-started-scalardb.md b/docs/3.12/helm-charts/getting-started-scalardb.md
new file mode 100644
index 00000000..773ca553
--- /dev/null
+++ b/docs/3.12/helm-charts/getting-started-scalardb.md
@@ -0,0 +1,384 @@
+# [Deprecated] Getting Started with Helm Charts (ScalarDB Server)
+
+{% capture notice--info %}
+**Note**
+
+ScalarDB Server is now deprecated. Please use [ScalarDB Cluster](https://github.com/scalar-labs/scalardb-cluster/blob/main/docs/setup-scalardb-cluster-on-kubernetes-by-using-helm-chart.md) instead.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+This document explains how to get started with ScalarDB Server using Helm Chart on a Kubernetes cluster as a test environment. Here, we assume that you already have a Mac or Linux environment for testing. We use **Minikube** in this document, but the steps we will show should work in any Kubernetes cluster.
+
+## Requirement
+
+* You need to subscribe to ScalarDB in the [AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-rzbuhxgvqf4d2) or [Azure Marketplace](https://azuremarketplace.microsoft.com/en/marketplace/apps/scalarinc.scalardb) to get container images (`scalardb-server` and `scalardb-envoy`). Please refer to the following documents for more details.
+ * [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md)
+ * [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md)
+
+## What we create
+
+We will deploy the following components on a Kubernetes cluster as follows.
+
+```
++--------------------------------------------------------------------------------------------------------------------------------------+
+| [Kubernetes Cluster] |
+| |
+| [Pod] [Pod] [Pod] [Pod] |
+| |
+| +-------+ +-----------------+ |
+| +---> | Envoy | ---+ +---> | ScalarDB Server | ---+ |
+| | +-------+ | | +-----------------+ | |
+| | | | | |
+| +--------+ +---------+ | +-------+ | +-------------------+ | +-----------------+ | +------------+ |
+| | Client | ---> | Service | ---+---> | Envoy | ---+---> | Service | ---+---> | ScalarDB Server | ---+---> | PostgreSQL | |
+| +--------+ | (Envoy) | | +-------+ | | (ScalarDB Server) | | +-----------------+ | +------------+ |
+| +---------+ | | +-------------------+ | | |
+| | +-------+ | | +-----------------+ | |
+| +---> | Envoy | ---+ +---> | ScalarDB Server | ---+ |
+| +-------+ +-----------------+ |
+| |
++--------------------------------------------------------------------------------------------------------------------------------------+
+```
+
+## Step 1. Start a Kubernetes cluster
+
+First, you need to prepare a Kubernetes cluster. If you use a **minikube** environment, please refer to the [Getting Started with Scalar Helm Charts](./getting-started-scalar-helm-charts.md). If you have already started a Kubernetes cluster, you can skip this step.
+
+## Step 2. Start a PostgreSQL container
+
+ScalarDB uses some kind of database system as a backend database. In this document, we use PostgreSQL.
+
+You can deploy PostgreSQL on the Kubernetes cluster as follows.
+
+1. Add the Bitnami helm repository.
+ ```console
+ helm repo add bitnami https://charts.bitnami.com/bitnami
+ ```
+
+1. Deploy PostgreSQL.
+ ```console
+ helm install postgresql-scalardb bitnami/postgresql \
+ --set auth.postgresPassword=postgres \
+ --set primary.persistence.enabled=false
+ ```
+
+1. Check if the PostgreSQL container is running.
+ ```console
+ kubectl get pod
+ ```
+ [Command execution result]
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-scalardb-0 1/1 Running 0 2m42s
+ ```
+
+## Step 3. Deploy ScalarDB Server on the Kubernetes cluster using Helm Charts
+
+1. Add the Scalar helm repository.
+ ```console
+ helm repo add scalar-labs https://scalar-labs.github.io/helm-charts
+ ```
+
+1. Create a secret resource to pull the ScalarDB container images from AWS/Azure Marketplace.
+ * AWS Marketplace
+ ```console
+ kubectl create secret docker-registry reg-ecr-mp-secrets \
+ --docker-server=709825985650.dkr.ecr.us-east-1.amazonaws.com \
+ --docker-username=AWS \
+ --docker-password=$(aws ecr get-login-password --region us-east-1)
+ ```
+ * Azure Marketplace
+ ```console
+ kubectl create secret docker-registry reg-acr-secrets \
+ --docker-server= \
+ --docker-username= \
+ --docker-password=
+ ```
+
+ Please refer to the following documents for more details.
+
+ * [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md)
+ * [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md)
+
+1. Create a custom values file for ScalarDB Server (scalardb-custom-values.yaml).
+ * AWS Marketplace
+
+ {% raw %}
+ ```console
+ cat << 'EOF' > scalardb-custom-values.yaml
+ envoy:
+ image:
+ repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardb-envoy"
+ version: "1.3.0"
+ imagePullSecrets:
+ - name: "reg-ecr-mp-secrets"
+
+ scalardb:
+ image:
+ repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardb-server"
+ tag: "3.7.0"
+ imagePullSecrets:
+ - name: "reg-ecr-mp-secrets"
+ databaseProperties: |
+ scalar.db.storage=jdbc
+ scalar.db.contact_points=jdbc:postgresql://postgresql-scalardb.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DB_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DB_POSTGRES_PASSWORD "" }}
+ secretName: "scalardb-credentials-secret"
+ EOF
+ ```
+ {% endraw %}
+
+ * Azure Marketplace
+
+ {% raw %}
+ ```console
+ cat << 'EOF' > scalardb-custom-values.yaml
+ envoy:
+ image:
+ repository: "/scalarinc/scalardb-envoy"
+ version: "1.3.0"
+ imagePullSecrets:
+ - name: "reg-acr-secrets"
+
+ scalardb:
+ image:
+ repository: "/scalarinc/scalardb-server"
+ tag: "3.7.0"
+ imagePullSecrets:
+ - name: "reg-acr-secrets"
+ databaseProperties: |
+ scalar.db.storage=jdbc
+ scalar.db.contact_points=jdbc:postgresql://postgresql-scalardb.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DB_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DB_POSTGRES_PASSWORD "" }}
+ secretName: "scalardb-credentials-secret"
+ EOF
+ ```
+ {% endraw %}
+
+1. Create a Secret resource that includes a username and password for PostgreSQL.
+ ```console
+ kubectl create secret generic scalardb-credentials-secret \
+ --from-literal=SCALAR_DB_POSTGRES_USERNAME=postgres \
+ --from-literal=SCALAR_DB_POSTGRES_PASSWORD=postgres
+ ```
+
+1. Deploy ScalarDB Server.
+ ```console
+ helm install scalardb scalar-labs/scalardb -f ./scalardb-custom-values.yaml
+ ```
+
+1. Check if the ScalarDB Server pods are deployed.
+ ```console
+ kubectl get pod
+ ```
+ [Command execution result]
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-scalardb-0 1/1 Running 0 9m48s
+ scalardb-765598848b-75csp 1/1 Running 0 6s
+ scalardb-765598848b-w864f 1/1 Running 0 6s
+ scalardb-765598848b-x8rqj 1/1 Running 0 6s
+ scalardb-envoy-84c475f77b-kpz2p 1/1 Running 0 6s
+ scalardb-envoy-84c475f77b-n74tk 1/1 Running 0 6s
+ scalardb-envoy-84c475f77b-zbrwz 1/1 Running 0 6s
+ ```
+ If the ScalarDB Server Pods are deployed properly, you can see the STATUS are **Running**.
+
+1. Check if the ScalarDB Server services are deployed.
+ ```console
+ kubectl get svc
+ ```
+ [Command execution result]
+ ```console
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ kubernetes ClusterIP 10.96.0.1 443/TCP 47d
+ postgresql-scalardb ClusterIP 10.109.118.122 5432/TCP 10m
+ postgresql-scalardb-hl ClusterIP None 5432/TCP 10m
+ scalardb-envoy ClusterIP 10.110.110.250 60051/TCP 41s
+ scalardb-envoy-metrics ClusterIP 10.107.98.227 9001/TCP 41s
+ scalardb-headless ClusterIP None 60051/TCP 41s
+ scalardb-metrics ClusterIP 10.108.188.10 8080/TCP 41s
+ ```
+ If the ScalarDB Server services are deployed properly, you can see private IP addresses in the CLUSTER-IP column. (Note: `scalardb-headless` has no CLUSTER-IP.)
+
+## Step 4. Start a Client container
+
+1. Start a Client container on the Kubernetes cluster.
+ ```console
+ kubectl run scalardb-client --image eclipse-temurin:8 --command sleep inf
+ ```
+
+1. Check if the Client container is running.
+ ```console
+ kubectl get pod scalardb-client
+ ```
+ [Command execution result]
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ scalardb-client 1/1 Running 0 23s
+ ```
+
+## Step 5. Run ScalarDB sample applications in the Client container
+
+The following explains the minimum steps. If you want to know more details about ScalarDB, please refer to the [Getting Started with ScalarDB](https://github.com/scalar-labs/scalardb/blob/master/docs/getting-started-with-scalardb.md).
+
+1. Run bash in the Client container.
+ ```console
+ kubectl exec -it scalardb-client -- bash
+ ```
+ After this step, run each command in the Client container.
+
+1. Install the git and curl commands in the Client container.
+ ```console
+ apt update && apt install -y git curl
+ ```
+
+1. Clone ScalarDB git repository.
+ ```console
+ git clone https://github.com/scalar-labs/scalardb.git
+ ```
+
+1. Change the directory to `scalardb/`.
+ ```console
+ cd scalardb/
+ ```
+ ```console
+ pwd
+ ```
+ [Command execution result]
+ ```console
+ /scalardb
+ ```
+
+1. Change branch to arbitrary version.
+ ```console
+ git checkout -b v3.7.0 refs/tags/v3.7.0
+ ```
+ ```console
+ git branch
+ ```
+ [Command execution result]
+
+ {% raw %}
+ ```console
+ master
+ * v3.7.0
+ ```
+ {% endraw %}
+
+ If you want to use another version, please specify the version (tag) you want to use.
+
+1. Change the directory to `docs/getting-started/`.
+ ```console
+ cd docs/getting-started/
+ ```
+ ```console
+ pwd
+ ```
+ [Command execution result]
+ ```console
+ /scalardb/docs/getting-started
+ ```
+
+1. Download Schema Loader from [ScalarDB Releases](https://github.com/scalar-labs/scalardb/releases).
+ ```console
+ curl -OL https://github.com/scalar-labs/scalardb/releases/download/v3.7.0/scalardb-schema-loader-3.7.0.jar
+ ```
+ You need to use the same version of ScalarDB and Schema Loader.
+
+1. Create a configuration file (scalardb.properties) to access ScalarDB Server on the Kubernetes cluster.
+ ```console
+ cat << 'EOF' > scalardb.properties
+ scalar.db.contact_points=scalardb-envoy.default.svc.cluster.local
+ scalar.db.contact_port=60051
+ scalar.db.storage=grpc
+ scalar.db.transaction_manager=grpc
+ EOF
+ ```
+
+1. Create a JSON file (emoney-transaction.json) that defines DB Schema for the sample applications.
+ ```console
+ cat << 'EOF' > emoney-transaction.json
+ {
+ "emoney.account": {
+ "transaction": true,
+ "partition-key": [
+ "id"
+ ],
+ "clustering-key": [],
+ "columns": {
+ "id": "TEXT",
+ "balance": "INT"
+ }
+ }
+ }
+ EOF
+ ```
+
+1. Run Schema Loader (Create sample TABLE).
+ ```console
+ java -jar ./scalardb-schema-loader-3.7.0.jar --config ./scalardb.properties -f emoney-transaction.json --coordinator
+ ```
+
+1. Run the sample applications.
+ * Charge `1000` to `user1`:
+ ```console
+ ./gradlew run --args="-action charge -amount 1000 -to user1"
+ ```
+ * Charge `0` to `merchant1` (Just create an account for `merchant1`):
+ ```console
+ ./gradlew run --args="-action charge -amount 0 -to merchant1"
+ ```
+ * Pay `100` from `user1` to `merchant1`:
+ ```console
+ ./gradlew run --args="-action pay -amount 100 -from user1 -to merchant1"
+ ```
+ * Get the balance of `user1`:
+ ```console
+ ./gradlew run --args="-action getBalance -id user1"
+ ```
+ * Get the balance of `merchant1`:
+ ```console
+ ./gradlew run --args="-action getBalance -id merchant1"
+ ```
+
+1. (Optional) You can see the inserted and modified (INSERT/UPDATE) data through the sample applications using the following command. (This command needs to run on your localhost, not on the Client container.)
+ ```console
+ kubectl exec -it postgresql-scalardb-0 -- bash -c 'export PGPASSWORD=postgres && psql -U postgres -d postgres -c "SELECT * FROM emoney.account"'
+ ```
+ [Command execution result]
+ ```sql
+ id | balance | tx_id | tx_state | tx_version | tx_prepared_at | tx_committed_at | before_tx_id | before_tx_state | before_tx_version | before_tx_prepared_at | before_tx_committed_at | before_balance
+ -----------+---------+--------------------------------------+----------+------------+----------------+-----------------+--------------------------------------+-----------------+-------------------+-----------------------+------------------------+----------------
+ merchant1 | 100 | 65a90225-0846-4e97-b729-151f76f6ca2f | 3 | 2 | 1667361909634 |1667361909679 | 3633df99-a8ed-4301-a8b9-db1344807d7b | 3 | 1 | 1667361902466 | 1667361902485 | 0
+ user1 | 900 | 65a90225-0846-4e97-b729-151f76f6ca2f | 3 | 2 | 1667361909634 |1667361909679 | 5520cba4-625a-4886-b81f-6089bf846d18 | 3 | 1 | 1667361897283 | 1667361897317 | 1000
+ (2 rows)
+ ```
+ * Note:
+ * Usually, you need to access data (records) through ScalarDB. The above command is used to explain and confirm the working of the sample applications.
+
+## Step 6. Delete all resources
+
+After completing the ScalarDB Server tests on the Kubernetes cluster, remove all resources.
+
+1. Uninstall ScalarDB Server and PostgreSQL.
+ ```console
+ helm uninstall scalardb postgresql-scalardb
+ ```
+
+1. Remove the Client container.
+
+ ```console
+ kubectl delete pod scalardb-client --force --grace-period 0
+ ```
+
+## Further reading
+
+You can see how to get started with monitoring or logging for Scalar products in the following documents.
+
+* [Getting Started with Helm Charts (Monitoring using Prometheus Operator)](./getting-started-monitoring.md)
+* [Getting Started with Helm Charts (Logging using Loki Stack)](./getting-started-logging.md)
+* [Getting Started with Helm Charts (Scalar Manager)](./getting-started-scalar-manager.md)
diff --git a/docs/3.12/helm-charts/getting-started-scalardl-auditor.md b/docs/3.12/helm-charts/getting-started-scalardl-auditor.md
new file mode 100644
index 00000000..8ef9da73
--- /dev/null
+++ b/docs/3.12/helm-charts/getting-started-scalardl-auditor.md
@@ -0,0 +1,1009 @@
+# Getting Started with Helm Charts (ScalarDL Ledger and Auditor / Auditor mode)
+
+This document explains how to get started with ScalarDL Ledger and Auditor using Helm Chart on a Kubernetes cluster as a test environment. Here, we assume that you already have a Mac or Linux environment for testing. We use **Minikube** in this document, but the steps we will show should work in any Kubernetes cluster.
+
+## Requirement
+
+You need to subscribe to ScalarDL Ledger and ScalarDL Auditor in the [AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-rzbuhxgvqf4d2) or [Azure Marketplace](https://azuremarketplace.microsoft.com/en/marketplace/apps/scalarinc.scalardb) to get the following container images.
+ * AWS Marketplace
+ * scalar-ledger
+ * scalar-ledger-envoy
+ * scalardl-schema-loader-ledger
+ * scalar-auditor
+ * scalar-auditor-envoy
+ * scalardl-schema-loader-auditor
+ * Azure Marketplace
+ * scalar-ledger
+ * scalar-auditor
+ * scalardl-envoy
+ * scalardl-schema-loader
+
+Please refer to the following documents for more details.
+ * [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md)
+ * [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md)
+
+## Note
+To make Byzantine fault detection with auditing work properly, Ledger and Auditor should be deployed and managed in different administrative domains. However, in this guide, we will deploy Ledger and Auditor in the same Kubernetes cluster to make the test easier.
+
+## What we create
+
+We will deploy the following components on a Kubernetes cluster as follows.
+
+```
++-----------------------------------------------------------------------------------------------------------------------------+
+| [Kubernetes Cluster] |
+| [Pod] [Pod] [Pod] |
+| |
+| +-------+ +---------+ |
+| +---> | Envoy | ---+ +---> | Ledger | ---+ |
+| | +-------+ | | +---------+ | |
+| | | | | |
+| +---------+ | +-------+ | +-----------+ | +---------+ | +---------------+ |
+| +---> | Service | ---+---> | Envoy | ---+---> | Service | ---+---> | Ledger | ---+---> | PostgreSQL | |
+| | | (Envoy) | | +-------+ | | (Ledger) | | +---------+ | | (For Ledger) | |
+| | +---------+ | | +-----------+ | | +---------------+ |
+| | | +-------+ | | +---------+ | |
+| | +---> | Envoy | ---+ +---> | Ledger | ---+ |
+| +--------+ | +-------+ +---------+ |
+| | Client | ---+ |
+| +--------+ | +-------+ +---------+ |
+| | +---> | Envoy | ---+ +---> | Auditor | ---+ |
+| | | +-------+ | | +---------+ | |
+| | | | | | |
+| | +---------+ | +-------+ | +-----------+ | +---------+ | +---------------+ |
+| +---> | Service | ---+---> | Envoy | ---+---> | Service | ---+---> | Auditor | ---+---> | PostgreSQL | |
+| | (Envoy) | | +-------+ | | (Auditor) | | +---------+ | | (For Auditor) | |
+| +---------+ | | +-----------+ | | +---------------+ |
+| | +-------+ | | +---------+ | |
+| +---> | Envoy | ---+ +---> | Auditor | ---+ |
+| +-------+ +---------+ |
+| |
++-----------------------------------------------------------------------------------------------------------------------------+
+```
+
+## Step 1. Start a Kubernetes cluster
+
+First, you need to prepare a Kubernetes cluster. If you use a **minikube** environment, please refer to the [Getting Started with Scalar Helm Charts](./getting-started-scalar-helm-charts.md). If you have already started a Kubernetes cluster, you can skip this step.
+
+## Step 2. Start PostgreSQL containers
+
+ScalarDL Ledger and Auditor use some kind of database system as a backend database. In this document, we use PostgreSQL.
+
+You can deploy PostgreSQL on the Kubernetes cluster as follows.
+
+1. Add the Bitnami helm repository.
+
+ ```console
+ helm repo add bitnami https://charts.bitnami.com/bitnami
+ ```
+
+1. Deploy PostgreSQL for Ledger.
+
+ ```console
+ helm install postgresql-ledger bitnami/postgresql \
+ --set auth.postgresPassword=postgres \
+ --set primary.persistence.enabled=false
+ ```
+
+1. Deploy PostgreSQL for Auditor.
+
+ ```console
+ helm install postgresql-auditor bitnami/postgresql \
+ --set auth.postgresPassword=postgres \
+ --set primary.persistence.enabled=false
+ ```
+
+1. Check if the PostgreSQL containers are running.
+
+ ```console
+ kubectl get pod
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-auditor-0 1/1 Running 0 11s
+ postgresql-ledger-0 1/1 Running 0 16s
+ ```
+
+## Step 3. Create a working directory
+
+We will create some configuration files and key/certificate files locally. So, create a working directory for them.
+
+1. Create a working directory.
+
+ ```console
+ mkdir -p ~/scalardl-test/certs/
+ ```
+
+## Step 4. Create key/certificate files
+
+Note: In this guide, we will use self-sign certificates for the test. However, it is strongly recommended that these certificates NOT be used in production.
+
+1. Change the working directory to `~/scalardl-test/certs/` directory.
+
+ ```console
+ cd ~/scalardl-test/certs/
+ ```
+
+1. Create a JSON file that includes Ledger information.
+
+ ```console
+ cat << 'EOF' > ~/scalardl-test/certs/ledger.json
+ {
+ "CN": "ledger",
+ "hosts": ["example.com","*.example.com"],
+ "key": {
+ "algo": "ecdsa",
+ "size": 256
+ },
+ "names": [
+ {
+ "O": "ledger",
+ "OU": "test team",
+ "L": "Shinjuku",
+ "ST": "Tokyo",
+ "C": "JP"
+ }
+ ]
+ }
+ EOF
+ ```
+
+1. Create a JSON file that includes Auditor information.
+
+ ```console
+ cat << 'EOF' > ~/scalardl-test/certs/auditor.json
+ {
+ "CN": "auditor",
+ "hosts": ["example.com","*.example.com"],
+ "key": {
+ "algo": "ecdsa",
+ "size": 256
+ },
+ "names": [
+ {
+ "O": "auditor",
+ "OU": "test team",
+ "L": "Shinjuku",
+ "ST": "Tokyo",
+ "C": "JP"
+ }
+ ]
+ }
+ EOF
+ ```
+
+1. Create a JSON file that includes Client information.
+
+ ```console
+ cat << 'EOF' > ~/scalardl-test/certs/client.json
+ {
+ "CN": "client",
+ "hosts": ["example.com","*.example.com"],
+ "key": {
+ "algo": "ecdsa",
+ "size": 256
+ },
+ "names": [
+ {
+ "O": "client",
+ "OU": "test team",
+ "L": "Shinjuku",
+ "ST": "Tokyo",
+ "C": "JP"
+ }
+ ]
+ }
+ EOF
+ ```
+
+1. Create key/certificate files for the Ledger.
+
+ ```console
+ cfssl selfsign "" ./ledger.json | cfssljson -bare ledger
+ ```
+
+1. Create key/certificate files for the Auditor.
+
+ ```console
+ cfssl selfsign "" ./auditor.json | cfssljson -bare auditor
+ ```
+
+1. Create key/certificate files for the Client.
+
+ ```console
+ cfssl selfsign "" ./client.json | cfssljson -bare client
+ ```
+
+1. Confirm key/certificate files are created.
+
+ ```console
+ ls -1
+ ```
+
+ [Command execution result]
+
+ ```console
+ auditor-key.pem
+ auditor.csr
+ auditor.json
+ auditor.pem
+ client-key.pem
+ client.csr
+ client.json
+ client.pem
+ ledger-key.pem
+ ledger.csr
+ ledger.json
+ ledger.pem
+ ```
+
+# Step 5. Create DB schemas for ScalarDL Ledger using Helm Charts
+
+We will deploy two ScalarDL Schema Loader pods on the Kubernetes cluster using Helm Charts.
+The ScalarDL Schema Loader will create the DB schemas for ScalarDL Ledger and Auditor in PostgreSQL.
+
+1. Change the working directory to `~/scalardl-test/`.
+
+ ```console
+ cd ~/scalardl-test/
+ ```
+
+1. Add the Scalar helm repository.
+
+ ```console
+ helm repo add scalar-labs https://scalar-labs.github.io/helm-charts
+ ```
+
+1. Create a secret resource to pull the ScalarDL container images from AWS/Azure Marketplace.
+ * AWS Marketplace
+
+ ```console
+ kubectl create secret docker-registry reg-ecr-mp-secrets \
+ --docker-server=709825985650.dkr.ecr.us-east-1.amazonaws.com \
+ --docker-username=AWS \
+ --docker-password=$(aws ecr get-login-password --region us-east-1)
+ ```
+
+ * Azure Marketplace
+
+ ```console
+ kubectl create secret docker-registry reg-acr-secrets \
+ --docker-server= \
+ --docker-username= \
+ --docker-password=
+ ```
+
+ Please refer to the following documents for more details.
+
+ * [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md)
+ * [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md)
+
+1. Create a custom values file for ScalarDL Schema Loader for Ledger (schema-loader-ledger-custom-values.yaml).
+ * AWS Marketplace
+
+ {% raw %}
+ ```console
+ cat << 'EOF' > ~/scalardl-test/schema-loader-ledger-custom-values.yaml
+ schemaLoading:
+ schemaType: "ledger"
+ image:
+ repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardl-schema-loader-ledger"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-ecr-mp-secrets"
+ databaseProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ secretName: "ledger-credentials-secret"
+ EOF
+ ```
+ {% endraw %}
+
+ * Azure Marketplace
+
+ {% raw %}
+ ```console
+ cat << 'EOF' > ~/scalardl-test/schema-loader-ledger-custom-values.yaml
+ schemaLoading:
+ schemaType: "ledger"
+ image:
+ repository: "/scalarinc/scalardl-schema-loader"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-acr-secrets"
+ databaseProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ secretName: "ledger-credentials-secret"
+ EOF
+ ```
+ {% endraw %}
+
+1. Create a custom values file for ScalarDL Schema Loader for Auditor (schema-loader-auditor-custom-values.yaml).
+ * AWS Marketplace
+
+ {% raw %}
+ ```console
+ cat << 'EOF' > ~/scalardl-test/schema-loader-auditor-custom-values.yaml
+ schemaLoading:
+ schemaType: "auditor"
+ image:
+ repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardl-schema-loader-auditor"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-ecr-mp-secrets"
+ databaseProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-auditor.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ secretName: "auditor-credentials-secret"
+ EOF
+ ```
+ {% endraw %}
+
+ * Azure Marketplace
+
+ {% raw %}
+ ```console
+ cat << 'EOF' > ~/scalardl-test/schema-loader-auditor-custom-values.yaml
+ schemaLoading:
+ schemaType: "auditor"
+ image:
+ repository: "/scalarinc/scalardl-schema-loader"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-acr-secrets"
+ databaseProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-auditor.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ secretName: "auditor-credentials-secret"
+ EOF
+ ```
+ {% endraw %}
+
+1. Create a secret resource that includes a username and password for PostgreSQL for Ledger.
+
+ ```console
+ kubectl create secret generic ledger-credentials-secret \
+ --from-literal=SCALAR_DL_LEDGER_POSTGRES_USERNAME=postgres \
+ --from-literal=SCALAR_DL_LEDGER_POSTGRES_PASSWORD=postgres
+ ```
+
+1. Create a secret resource that includes a username and password for PostgreSQL for Auditor.
+
+ ```console
+ kubectl create secret generic auditor-credentials-secret \
+ --from-literal=SCALAR_DL_AUDITOR_POSTGRES_USERNAME=postgres \
+ --from-literal=SCALAR_DL_AUDITOR_POSTGRES_PASSWORD=postgres
+ ```
+
+1. Deploy the ScalarDL Schema Loader for Ledger.
+
+ ```console
+ helm install schema-loader-ledger scalar-labs/schema-loading -f ./schema-loader-ledger-custom-values.yaml
+ ```
+
+1. Deploy the ScalarDL Schema Loader for Auditor.
+
+ ```console
+ helm install schema-loader-auditor scalar-labs/schema-loading -f ./schema-loader-auditor-custom-values.yaml
+ ```
+
+1. Check if the ScalarDL Schema Loader pods are deployed and completed.
+
+ ```console
+ kubectl get pod
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-auditor-0 1/1 Running 0 2m56s
+ postgresql-ledger-0 1/1 Running 0 3m1s
+ schema-loader-auditor-schema-loading-dvc5r 0/1 Completed 0 6s
+ schema-loader-ledger-schema-loading-mtllb 0/1 Completed 0 10s
+ ```
+
+ If the ScalarDL Schema Loader pods are **ContainerCreating** or **Running**, wait for the process will be completed (The STATUS will be **Completed**).
+
+## Step 6. Deploy ScalarDL Ledger and Auditor on the Kubernetes cluster using Helm Charts
+
+1. Create a custom values file for ScalarDL Ledger (scalardl-ledger-custom-values.yaml).
+ * AWS Marketplace
+
+ {% raw %}
+ ```console
+ cat << 'EOF' > ~/scalardl-test/scalardl-ledger-custom-values.yaml
+ envoy:
+ image:
+ repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalar-ledger-envoy"
+ version: "1.3.0"
+ imagePullSecrets:
+ - name: "reg-ecr-mp-secrets"
+
+ ledger:
+ image:
+ repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalar-ledger"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-ecr-mp-secrets"
+ ledgerProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ scalar.dl.ledger.proof.enabled=true
+ scalar.dl.ledger.auditor.enabled=true
+ scalar.dl.ledger.proof.private_key_path=/keys/private-key
+ secretName: "ledger-credentials-secret"
+ extraVolumes:
+ - name: "ledger-keys"
+ secret:
+ secretName: "ledger-keys"
+ extraVolumeMounts:
+ - name: "ledger-keys"
+ mountPath: "/keys"
+ readOnly: true
+ EOF
+ ```
+ {% endraw %}
+
+ * Azure Marketplace
+
+ {% raw %}
+ ```console
+ cat << 'EOF' > ~/scalardl-test/scalardl-ledger-custom-values.yaml
+ envoy:
+ image:
+ repository: "/scalarinc/scalardl-envoy"
+ version: "1.3.0"
+ imagePullSecrets:
+ - name: "reg-acr-secrets"
+
+ ledger:
+ image:
+ repository: "/scalarinc/scalar-ledger"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-acr-secrets"
+ ledgerProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ scalar.dl.ledger.proof.enabled=true
+ scalar.dl.ledger.proof.private_key_path=/keys/private-key
+ secretName: "ledger-credentials-secret"
+ extraVolumes:
+ - name: "ledger-keys"
+ secret:
+ secretName: "ledger-keys"
+ extraVolumeMounts:
+ - name: "ledger-keys"
+ mountPath: "/keys"
+ readOnly: true
+ EOF
+ ```
+ {% endraw %}
+
+1. Create a custom values file for ScalarDL Auditor (scalardl-auditor-custom-values.yaml).
+ * AWS Marketplace
+
+ {% raw %}
+ ```console
+ cat << 'EOF' > ~/scalardl-test/scalardl-auditor-custom-values.yaml
+ envoy:
+ image:
+ repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalar-auditor-envoy"
+ version: "1.3.0"
+ imagePullSecrets:
+ - name: "reg-ecr-mp-secrets"
+
+ auditor:
+ image:
+ repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalar-auditor"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-ecr-mp-secrets"
+ auditorProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-auditor.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ scalar.dl.auditor.ledger.host=scalardl-ledger-envoy.default.svc.cluster.local
+ scalar.dl.auditor.cert_path=/keys/certificate
+ scalar.dl.auditor.private_key_path=/keys/private-key
+ secretName: "auditor-credentials-secret"
+ extraVolumes:
+ - name: "auditor-keys"
+ secret:
+ secretName: "auditor-keys"
+ extraVolumeMounts:
+ - name: "auditor-keys"
+ mountPath: "/keys"
+ readOnly: true
+ EOF
+ ```
+ {% endraw %}
+
+ * Azure Marketplace
+
+ {% raw %}
+ ```console
+ cat << 'EOF' > ~/scalardl-test/scalardl-auditor-custom-values.yaml
+ envoy:
+ image:
+ repository: "/scalarinc/scalardl-envoy"
+ version: "1.3.0"
+ imagePullSecrets:
+ - name: "reg-acr-secrets"
+
+ auditor:
+ image:
+ repository: "/scalarinc/scalar-auditor"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-acr-secrets"
+ auditorProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-auditor.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ scalar.dl.auditor.ledger.host=scalardl-ledger-envoy.default.svc.cluster.local
+ scalar.dl.auditor.cert_path=/keys/certificate
+ scalar.dl.auditor.private_key_path=/keys/private-key
+ secretName: "auditor-credentials-secret"
+ extraVolumes:
+ - name: "auditor-keys"
+ secret:
+ secretName: "auditor-keys"
+ extraVolumeMounts:
+ - name: "auditor-keys"
+ mountPath: "/keys"
+ readOnly: true
+ EOF
+ ```
+ {% endraw %}
+
+1. Create secret resource `ledger-keys`.
+
+ ```console
+ kubectl create secret generic ledger-keys --from-file=certificate=./certs/ledger.pem --from-file=private-key=./certs/ledger-key.pem
+ ```
+
+1. Create secret resource `auditor-keys`.
+
+ ```console
+ kubectl create secret generic auditor-keys --from-file=certificate=./certs/auditor.pem --from-file=private-key=./certs/auditor-key.pem
+ ```
+
+1. Deploy the ScalarDL Ledger.
+
+ ```console
+ helm install scalardl-ledger scalar-labs/scalardl -f ./scalardl-ledger-custom-values.yaml
+ ```
+
+1. Deploy the ScalarDL Auditor.
+
+ ```console
+ helm install scalardl-auditor scalar-labs/scalardl-audit -f ./scalardl-auditor-custom-values.yaml
+ ```
+
+1. Check if the ScalarDL Ledger and Auditor pods are deployed.
+
+ ```console
+ kubectl get pod
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-auditor-0 1/1 Running 0 14m
+ postgresql-ledger-0 1/1 Running 0 14m
+ scalardl-auditor-auditor-5b885ff4c8-fwkpf 1/1 Running 0 18s
+ scalardl-auditor-auditor-5b885ff4c8-g69cb 1/1 Running 0 18s
+ scalardl-auditor-auditor-5b885ff4c8-nsmnq 1/1 Running 0 18s
+ scalardl-auditor-envoy-689bcbdf65-5mn6v 1/1 Running 0 18s
+ scalardl-auditor-envoy-689bcbdf65-fpq8j 1/1 Running 0 18s
+ scalardl-auditor-envoy-689bcbdf65-lsz2t 1/1 Running 0 18s
+ scalardl-ledger-envoy-547bbf7546-n7p5x 1/1 Running 0 26s
+ scalardl-ledger-envoy-547bbf7546-p8nwp 1/1 Running 0 26s
+ scalardl-ledger-envoy-547bbf7546-pskpb 1/1 Running 0 26s
+ scalardl-ledger-ledger-6db5dc8774-5zsbj 1/1 Running 0 26s
+ scalardl-ledger-ledger-6db5dc8774-vnmrw 1/1 Running 0 26s
+ scalardl-ledger-ledger-6db5dc8774-wpjvs 1/1 Running 0 26s
+ schema-loader-auditor-schema-loading-dvc5r 0/1 Completed 0 11m
+ schema-loader-ledger-schema-loading-mtllb 0/1 Completed 0 11m
+ ```
+
+ If the ScalarDL Ledger and Auditor pods are deployed properly, you can see the STATUS are **Running**.
+
+1. Check if the ScalarDL Ledger and Auditor services are deployed.
+
+ ```console
+ kubectl get svc
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ kubernetes ClusterIP 10.96.0.1 443/TCP 47d
+ postgresql-auditor ClusterIP 10.107.9.78 5432/TCP 15m
+ postgresql-auditor-hl ClusterIP None 5432/TCP 15m
+ postgresql-ledger ClusterIP 10.108.241.181 5432/TCP 15m
+ postgresql-ledger-hl ClusterIP None 5432/TCP 15m
+ scalardl-auditor-envoy ClusterIP 10.100.61.202 40051/TCP,40052/TCP 55s
+ scalardl-auditor-envoy-metrics ClusterIP 10.99.6.227 9001/TCP 55s
+ scalardl-auditor-headless ClusterIP None 40051/TCP,40053/TCP,40052/TCP 55s
+ scalardl-auditor-metrics ClusterIP 10.108.1.147 8080/TCP 55s
+ scalardl-ledger-envoy ClusterIP 10.101.191.116 50051/TCP,50052/TCP 61s
+ scalardl-ledger-envoy-metrics ClusterIP 10.106.52.103 9001/TCP 61s
+ scalardl-ledger-headless ClusterIP None 50051/TCP,50053/TCP,50052/TCP 61s
+ scalardl-ledger-metrics ClusterIP 10.99.122.106 8080/TCP 61s
+ ```
+
+ If the ScalarDL Ledger and Auditor services are deployed properly, you can see private IP addresses in the CLUSTER-IP column. (Note: `scalardl-ledger-headless` and `scalardl-auditor-headless` have no CLUSTER-IP.)
+
+## Step 7. Start a Client container
+
+We will use certificate files in a Client container. So, we create a secret resource and mount it to a Client container.
+
+1. Create secret resource `client-keys`.
+
+ ```console
+ kubectl create secret generic client-keys --from-file=certificate=./certs/client.pem --from-file=private-key=./certs/client-key.pem
+ ```
+
+1. Start a Client container on the Kubernetes cluster.
+
+ ```console
+ cat << 'EOF' | kubectl apply -f -
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: "scalardl-client"
+ spec:
+ containers:
+ - name: scalardl-client
+ image: eclipse-temurin:8
+ command: ['sleep']
+ args: ['inf']
+ volumeMounts:
+ - name: "ledger-keys"
+ mountPath: "/keys/ledger"
+ readOnly: true
+ - name: "auditor-keys"
+ mountPath: "/keys/auditor"
+ readOnly: true
+ - name: "client-keys"
+ mountPath: "/keys/client"
+ readOnly: true
+ volumes:
+ - name: "ledger-keys"
+ secret:
+ secretName: "ledger-keys"
+ - name: "auditor-keys"
+ secret:
+ secretName: "auditor-keys"
+ - name: "client-keys"
+ secret:
+ secretName: "client-keys"
+ restartPolicy: Never
+ EOF
+ ```
+
+1. Check if the Client container is running.
+
+ ```console
+ kubectl get pod scalardl-client
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ scalardl-client 1/1 Running 0 4s
+ ```
+
+## Step 8. Run ScalarDL sample contracts in the Client container
+
+The following explains the minimum steps. If you want to know more details about ScalarDL Ledger and Auditor, please refer to the following documents.
+ * [Getting Started with ScalarDL](https://github.com/scalar-labs/scalardl/blob/master/docs/getting-started.md)
+ * [Getting Started with ScalarDL Auditor](https://github.com/scalar-labs/scalardl/blob/master/docs/getting-started-auditor.md)
+
+When you use Auditor, you need to register the certificate for the Ledger and Auditor before starting the client application. Ledger needs to register its certificate to Auditor, and Auditor needs to register its certificate to Ledger.
+
+1. Run bash in the Client container.
+
+ ```console
+ kubectl exec -it scalardl-client -- bash
+ ```
+
+ After this step, run each command in the Client container.
+
+1. Install the git, curl and unzip commands in the Client container.
+
+ ```console
+ apt update && apt install -y git curl unzip
+ ```
+
+1. Clone ScalarDL Java Client SDK git repository.
+
+ ```console
+ git clone https://github.com/scalar-labs/scalardl-java-client-sdk.git
+ ```
+
+1. Change the directory to `scalardl-java-client-sdk/`.
+
+ ```console
+ cd scalardl-java-client-sdk/
+ ```
+
+ ```console
+ pwd
+ ```
+
+ [Command execution result]
+
+ ```console
+ /scalardl-java-client-sdk
+ ```
+
+1. Change branch to arbitrary version.
+
+ ```console
+ git checkout -b v3.6.0 refs/tags/v3.6.0
+ ```
+
+ ```console
+ git branch
+ ```
+
+ [Command execution result]
+
+ ```console
+ master
+ * v3.6.0
+ ```
+
+ If you want to use another version, please specify the version (tag) you want to use. You need to use the same version of ScalarDL Ledger and ScalarDL Java Client SDK.
+
+1. Build the sample contracts.
+
+ ```console
+ ./gradlew assemble
+ ```
+
+1. Download CLI tools of ScalarDL from [ScalarDL Java Client SDK Releases](https://github.com/scalar-labs/scalardl-java-client-sdk/releases).
+
+ ```console
+ curl -OL https://github.com/scalar-labs/scalardl-java-client-sdk/releases/download/v3.6.0/scalardl-java-client-sdk-3.6.0.zip
+ ```
+
+ You need to use the same version of CLI tools and ScalarDL Ledger.
+
+1. Unzip the `scalardl-java-client-sdk-3.6.0.zip` file.
+
+ ```console
+ unzip ./scalardl-java-client-sdk-3.6.0.zip
+ ```
+
+1. Create a configuration file (ledger.as.client.properties) to register the certificate of Ledger to Auditor.
+
+ ```console
+ cat << 'EOF' > ledger.as.client.properties
+ # Ledger
+ scalar.dl.client.server.host=scalardl-ledger-envoy.default.svc.cluster.local
+
+ # Auditor
+ scalar.dl.client.auditor.enabled=true
+ scalar.dl.client.auditor.host=scalardl-auditor-envoy.default.svc.cluster.local
+
+ # Certificate
+ scalar.dl.client.cert_holder_id=ledger
+ scalar.dl.client.cert_path=/keys/ledger/certificate
+ scalar.dl.client.private_key_path=/keys/ledger/private-key
+ EOF
+ ```
+
+1. Create a configuration file (auditor.as.client.properties) to register the certificate of Auditor to Ledger.
+
+ ```console
+ cat << 'EOF' > auditor.as.client.properties
+ # Ledger
+ scalar.dl.client.server.host=scalardl-ledger-envoy.default.svc.cluster.local
+
+ # Auditor
+ scalar.dl.client.auditor.enabled=true
+ scalar.dl.client.auditor.host=scalardl-auditor-envoy.default.svc.cluster.local
+
+ # Certificate
+ scalar.dl.client.cert_holder_id=auditor
+ scalar.dl.client.cert_path=/keys/auditor/certificate
+ scalar.dl.client.private_key_path=/keys/auditor/private-key
+ EOF
+ ```
+
+1. Create a configuration file (client.properties) to access ScalarDL Ledger on the Kubernetes cluster.
+
+ ```console
+ cat << 'EOF' > client.properties
+ # Ledger
+ scalar.dl.client.server.host=scalardl-ledger-envoy.default.svc.cluster.local
+
+ # Auditor
+ scalar.dl.client.auditor.enabled=true
+ scalar.dl.client.auditor.host=scalardl-auditor-envoy.default.svc.cluster.local
+
+ # Certificate
+ scalar.dl.client.cert_holder_id=client
+ scalar.dl.client.cert_path=/keys/client/certificate
+ scalar.dl.client.private_key_path=/keys/client/private-key
+ EOF
+ ```
+
+1. Register the certificate file of Ledger.
+
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/register-cert --properties ./ledger.as.client.properties
+ ```
+
+1. Register the certificate file of Auditor.
+
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/register-cert --properties ./auditor.as.client.properties
+ ```
+
+1. Register the certificate file of client.
+
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/register-cert --properties ./client.properties
+ ```
+
+1. Register the sample contract `StateUpdater`.
+
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/register-contract --properties ./client.properties --contract-id StateUpdater --contract-binary-name com.org1.contract.StateUpdater --contract-class-file ./build/classes/java/main/com/org1/contract/StateUpdater.class
+ ```
+
+1. Register the sample contract `StateReader`.
+
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/register-contract --properties ./client.properties --contract-id StateReader --contract-binary-name com.org1.contract.StateReader --contract-class-file ./build/classes/java/main/com/org1/contract/StateReader.class
+ ```
+
+1. Register the contract `ValdateLedger` to execute a validate request.
+
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/register-contract --properties ./client.properties --contract-id validate-ledger --contract-binary-name com.scalar.dl.client.contract.ValidateLedger --contract-class-file ./build/classes/java/main/com/scalar/dl/client/contract/ValidateLedger.class
+ ```
+
+1. Execute the contract `StateUpdater`.
+
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/execute-contract --properties ./client.properties --contract-id StateUpdater --contract-argument '{"asset_id": "test_asset", "state": 3}'
+ ```
+
+ This sample contract updates the `state` (value) of the asset named `test_asset` to `3`.
+
+1. Execute the contract `StateReader`.
+
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/execute-contract --properties ./client.properties --contract-id StateReader --contract-argument '{"asset_id": "test_asset"}'
+ ```
+
+ [Command execution result]
+
+ ```console
+ Contract result:
+ {
+ "id" : "test_asset",
+ "age" : 0,
+ "output" : {
+ "state" : 3
+ }
+ }
+ ```
+
+ * Reference information
+ * If the asset data is not tampered with, the contract execution request (execute-contract command) returns `OK` as a result.
+ * If the asset data is tampered with (e.g. the `state` value in the DB is tampered with), the contract execution request (execute-contract command) returns a value other than `OK` (e.g. `INCONSISTENT_STATES`) as a result, like the following.
+ [Command execution result (If the asset data is tampered with)]
+
+ ```console
+ {
+ "status_code" : "INCONSISTENT_STATES",
+ "error_message" : "The results from Ledger and Auditor don't match"
+ }
+ ```
+
+ * In this way, the ScalarDL can detect data tampering.
+
+1. Execute a validation request for the asset.
+
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/validate-ledger --properties ./client.properties --asset-id "test_asset"
+ ```
+
+ [Command execution result]
+
+ ```console
+ {
+ "status_code" : "OK",
+ "Ledger" : {
+ "id" : "test_asset",
+ "age" : 0,
+ "nonce" : "3533427d-03cf-41d1-bf95-4d31eb0cb24d",
+ "hash" : "FiquvtPMKLlxKf4VGoccSAGsi9ptn4ozYVVTwdSzEQ0=",
+ "signature" : "MEYCIQDiiXqzw6K+Ml4uvn8rK43o5wHWESU3hoXnZPi6/OeKVwIhAM+tFBcapl6zg47Uq0Uc8nVNGWNHZLBDBGve3F0xkzTR"
+ },
+ "Auditor" : {
+ "id" : "test_asset",
+ "age" : 0,
+ "nonce" : "3533427d-03cf-41d1-bf95-4d31eb0cb24d",
+ "hash" : "FiquvtPMKLlxKf4VGoccSAGsi9ptn4ozYVVTwdSzEQ0=",
+ "signature" : "MEUCIQDLsfUR2PmxSvfpL3YvHJUkz00RDpjCdctkroZKXE8d5QIgH73FQH2e11jfnynD00Pp9DrIG1vYizxDsvxUsMPo9IU="
+ }
+ }
+ ```
+
+ * Reference information
+ * If the asset data is not tampered with, the validation request (validate-ledger command) returns `OK` as a result.
+ * If the asset data is tampered with (e.g. the `state` value in the DB is tampered with), the validation request (validate-ledger command) returns a value other than `OK` (e.g. `INVALID_OUTPUT`) as a result, like the following.
+ [Command execution result (If the asset data is tampered with)]
+
+ ```console
+ {
+ "status_code" : "INCONSISTENT_STATES",
+ "error_message" : "The results from Ledger and Auditor don't match"
+ }
+ ```
+
+ * In this way, the ScalarDL Ledger can detect data tampering.
+
+## Step 9. Delete all resources
+
+After completing the ScalarDL Ledger tests on the Kubernetes cluster, remove all resources.
+
+1. Uninstall ScalarDL Ledger, ScalarDL Schema Loader, and PostgreSQL.
+
+ ```console
+ helm uninstall scalardl-ledger schema-loader-ledger postgresql-ledger scalardl-auditor schema-loader-auditor postgresql-auditor
+ ```
+
+1. Remove the Client container.
+
+ ```
+ kubectl delete pod scalardl-client --force --grace-period 0
+ ```
+
+1. Remove the working directory and sample files (configuration file, key, and certificate).
+
+ ```console
+ cd ~
+ ```
+
+ ```console
+ rm -rf ~/scalardl-test/
+ ```
+
+## Further reading
+
+You can see how to get started with monitoring or logging for Scalar products in the following documents.
+
+* [Getting Started with Helm Charts (Monitoring using Prometheus Operator)](./getting-started-monitoring.md)
+* [Getting Started with Helm Charts (Logging using Loki Stack)](./getting-started-logging.md)
+* [Getting Started with Helm Charts (Scalar Manager)](./getting-started-scalar-manager.md)
diff --git a/docs/3.12/helm-charts/getting-started-scalardl-ledger.md b/docs/3.12/helm-charts/getting-started-scalardl-ledger.md
new file mode 100644
index 00000000..59715b2b
--- /dev/null
+++ b/docs/3.12/helm-charts/getting-started-scalardl-ledger.md
@@ -0,0 +1,687 @@
+# Getting Started with Helm Charts (ScalarDL Ledger / Ledger only)
+
+This document explains how to get started with ScalarDL Ledger using Helm Chart on a Kubernetes cluster as a test environment. Here, we assume that you already have a Mac or Linux environment for testing. We use **Minikube** in this document, but the steps we will show should work in any Kubernetes cluster.
+
+## Requirement
+
+You need to subscribe to ScalarDL Ledger in the [AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-rzbuhxgvqf4d2) or [Azure Marketplace](https://azuremarketplace.microsoft.com/en/marketplace/apps/scalarinc.scalardb) to get the following container images.
+ * AWS Marketplace
+ * scalar-ledger
+ * scalar-ledger-envoy
+ * scalardl-schema-loader-ledger
+ * Azure Marketplace
+ * scalar-ledger
+ * scalardl-envoy
+ * scalardl-schema-loader
+
+Please refer to the following documents for more details.
+ * [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md)
+ * [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md)
+
+## What we create
+
+We will deploy the following components on a Kubernetes cluster as follows.
+
+```
++--------------------------------------------------------------------------------------------------------------------------------------+
+| [Kubernetes Cluster] |
+| |
+| [Pod] [Pod] [Pod] [Pod] |
+| |
+| +-------+ +-----------------+ |
+| +---> | Envoy | ---+ +---> | ScalarDL Ledger | ---+ |
+| | +-------+ | | +-----------------+ | |
+| | | | | |
+| +--------+ +---------+ | +-------+ | +-------------------+ | +-----------------+ | +------------+ |
+| | Client | ---> | Service | ---+---> | Envoy | ---+---> | Service | ---+---> | ScalarDL Ledger | ---+---> | PostgreSQL | |
+| +--------+ | (Envoy) | | +-------+ | | (ScalarDL Ledger) | | +-----------------+ | +------------+ |
+| +---------+ | | +-------------------+ | | |
+| | +-------+ | | +-----------------+ | |
+| +---> | Envoy | ---+ +---> | ScalarDL Ledger | ---+ |
+| +-------+ +-----------------+ |
+| |
++--------------------------------------------------------------------------------------------------------------------------------------+
+```
+
+## Step 1. Start a Kubernetes cluster
+
+First, you need to prepare a Kubernetes cluster. If you use a **minikube** environment, please refer to the [Getting Started with Scalar Helm Charts](./getting-started-scalar-helm-charts.md). If you have already started a Kubernetes cluster, you can skip this step.
+
+## Step 2. Start a PostgreSQL container
+
+ScalarDL Ledger uses some kind of database system as a backend database. In this document, we use PostgreSQL.
+
+You can deploy PostgreSQL on the Kubernetes cluster as follows.
+
+1. Add the Bitnami helm repository.
+
+ ```console
+ helm repo add bitnami https://charts.bitnami.com/bitnami
+ ```
+
+1. Deploy PostgreSQL.
+
+ ```console
+ helm install postgresql-ledger bitnami/postgresql \
+ --set auth.postgresPassword=postgres \
+ --set primary.persistence.enabled=false
+ ```
+
+1. Check if the PostgreSQL container is running.
+
+ ```console
+ kubectl get pod
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-ledger-0 1/1 Running 0 11s
+ ```
+
+## Step 3. Create a working directory
+
+We will create some configuration files and key/certificate files locally. So, create a working directory for them.
+
+1. Create a working directory.
+
+ ```console
+ mkdir -p ~/scalardl-test/certs/
+ ```
+
+## Step 4. Create key/certificate files
+
+Note: In this guide, we will use self-sign certificates for the test. However, it is strongly recommended that these certificates NOT be used in production.
+
+1. Change the working directory to `~/scalardl-test/certs/` directory.
+
+ ```console
+ cd ~/scalardl-test/certs/
+ ```
+
+1. Create a JSON file that includes Ledger information.
+
+ ```console
+ cat << 'EOF' > ~/scalardl-test/certs/ledger.json
+ {
+ "CN": "ledger",
+ "hosts": ["example.com","*.example.com"],
+ "key": {
+ "algo": "ecdsa",
+ "size": 256
+ },
+ "names": [
+ {
+ "O": "ledger",
+ "OU": "test team",
+ "L": "Shinjuku",
+ "ST": "Tokyo",
+ "C": "JP"
+ }
+ ]
+ }
+ EOF
+ ```
+
+1. Create a JSON file that includes Client information.
+
+ ```console
+ cat << 'EOF' > ~/scalardl-test/certs/client.json
+ {
+ "CN": "client",
+ "hosts": ["example.com","*.example.com"],
+ "key": {
+ "algo": "ecdsa",
+ "size": 256
+ },
+ "names": [
+ {
+ "O": "client",
+ "OU": "test team",
+ "L": "Shinjuku",
+ "ST": "Tokyo",
+ "C": "JP"
+ }
+ ]
+ }
+ EOF
+ ```
+
+1. Create key/certificate files for the Ledger.
+
+ ```console
+ cfssl selfsign "" ./ledger.json | cfssljson -bare ledger
+ ```
+
+1. Create key/certificate files for the Client.
+
+ ```console
+ cfssl selfsign "" ./client.json | cfssljson -bare client
+ ```
+
+1. Confirm key/certificate files are created.
+
+ ```console
+ ls -1
+ ```
+
+ [Command execution result]
+
+ ```console
+ client-key.pem
+ client.csr
+ client.json
+ client.pem
+ ledger-key.pem
+ ledger.csr
+ ledger.json
+ ledger.pem
+ ```
+
+## Step 5. Create DB schemas for ScalarDL Ledger using Helm Charts
+
+We will deploy a ScalarDL Schema Loader on the Kubernetes cluster using Helm Charts.
+The ScalarDL Schema Loader will create the DB schemas for ScalarDL Ledger in PostgreSQL.
+
+1. Change the working directory to `~/scalardl-test/`.
+
+ ```console
+ cd ~/scalardl-test/
+ ```
+
+1. Add the Scalar helm repository.
+
+ ```console
+ helm repo add scalar-labs https://scalar-labs.github.io/helm-charts
+ ```
+
+1. Create a secret resource to pull the ScalarDL container images from AWS/Azure Marketplace.
+ * AWS Marketplace
+
+ ```console
+ kubectl create secret docker-registry reg-ecr-mp-secrets \
+ --docker-server=709825985650.dkr.ecr.us-east-1.amazonaws.com \
+ --docker-username=AWS \
+ --docker-password=$(aws ecr get-login-password --region us-east-1)
+ ```
+
+ * Azure Marketplace
+
+ ```console
+ kubectl create secret docker-registry reg-acr-secrets \
+ --docker-server= \
+ --docker-username= \
+ --docker-password=
+ ```
+
+ Please refer to the following documents for more details.
+
+ * [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md)
+ * [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md)
+
+1. Create a custom values file for ScalarDL Schema Loader (schema-loader-ledger-custom-values.yaml).
+ * AWS Marketplace
+
+ {% raw %}
+ ```console
+ cat << 'EOF' > ~/scalardl-test/schema-loader-ledger-custom-values.yaml
+ schemaLoading:
+ schemaType: "ledger"
+ image:
+ repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardl-schema-loader-ledger"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-ecr-mp-secrets"
+ databaseProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ secretName: "ledger-credentials-secret"
+ EOF
+ ```
+ {% endraw %}
+ * Azure Marketplace
+
+ {% raw %}
+ ```console
+ cat << 'EOF' > ~/scalardl-test/schema-loader-ledger-custom-values.yaml
+ schemaLoading:
+ schemaType: "ledger"
+ image:
+ repository: "/scalarinc/scalardl-schema-loader"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-acr-secrets"
+ databaseProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ secretName: "ledger-credentials-secret"
+ EOF
+ ```
+ {% endraw %}
+
+1. Create a secret resource that includes a username and password for PostgreSQL.
+
+ ```console
+ kubectl create secret generic ledger-credentials-secret \
+ --from-literal=SCALAR_DL_LEDGER_POSTGRES_USERNAME=postgres \
+ --from-literal=SCALAR_DL_LEDGER_POSTGRES_PASSWORD=postgres
+ ```
+
+1. Deploy the ScalarDL Schema Loader.
+
+ ```console
+ helm install schema-loader-ledger scalar-labs/schema-loading -f ./schema-loader-ledger-custom-values.yaml
+ ```
+
+1. Check if the ScalarDL Schema Loader pod is deployed and completed.
+
+ ```console
+ kubectl get pod
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-ledger-0 1/1 Running 0 11m
+ schema-loader-ledger-schema-loading-46rcr 0/1 Completed 0 3s
+ ```
+
+ If the ScalarDL Schema Loader pod is **ContainerCreating** or **Running**, wait for the process will be completed (The STATUS will be **Completed**).
+
+## Step 6. Deploy ScalarDL Ledger on the Kubernetes cluster using Helm Charts
+
+1. Create a custom values file for ScalarDL Ledger (scalardl-ledger-custom-values.yaml).
+ * AWS Marketplace
+
+ {% raw %}
+ ```console
+ cat << 'EOF' > ~/scalardl-test/scalardl-ledger-custom-values.yaml
+ envoy:
+ image:
+ repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalar-ledger-envoy"
+ version: "1.3.0"
+ imagePullSecrets:
+ - name: "reg-ecr-mp-secrets"
+
+ ledger:
+ image:
+ repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalar-ledger"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-ecr-mp-secrets"
+ ledgerProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ scalar.dl.ledger.proof.enabled=true
+ scalar.dl.ledger.proof.private_key_path=/keys/private-key
+ secretName: "ledger-credentials-secret"
+ extraVolumes:
+ - name: "ledger-keys"
+ secret:
+ secretName: "ledger-keys"
+ extraVolumeMounts:
+ - name: "ledger-keys"
+ mountPath: "/keys"
+ readOnly: true
+ EOF
+ ```
+ {% endraw %}
+
+ * Azure Marketplace
+
+ {% raw %}
+ ```console
+ cat << 'EOF' > ~/scalardl-test/scalardl-ledger-custom-values.yaml
+ envoy:
+ image:
+ repository: "/scalarinc/scalardl-envoy"
+ version: "1.3.0"
+ imagePullSecrets:
+ - name: "reg-acr-secrets"
+
+ ledger:
+ image:
+ repository: "/scalarinc/scalar-ledger"
+ version: "3.6.0"
+ imagePullSecrets:
+ - name: "reg-acr-secrets"
+ ledgerProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ scalar.dl.ledger.proof.enabled=true
+ scalar.dl.ledger.proof.private_key_path=/keys/private-key
+ secretName: "ledger-credentials-secret"
+ extraVolumes:
+ - name: "ledger-keys"
+ secret:
+ secretName: "ledger-keys"
+ extraVolumeMounts:
+ - name: "ledger-keys"
+ mountPath: "/keys"
+ readOnly: true
+ EOF
+ ```
+ {% endraw %}
+
+1. Create secret resource `ledger-keys`.
+
+ ```console
+ kubectl create secret generic ledger-keys --from-file=private-key=./certs/ledger-key.pem
+ ```
+
+1. Deploy the ScalarDL Ledger.
+
+ ```console
+ helm install scalardl-ledger scalar-labs/scalardl -f ./scalardl-ledger-custom-values.yaml
+ ```
+
+1. Check if the ScalarDL Ledger pods are deployed.
+
+ ```console
+ kubectl get pod
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ postgresql-ledger-0 1/1 Running 0 14m
+ scalardl-ledger-envoy-547bbf7546-6cn88 1/1 Running 0 52s
+ scalardl-ledger-envoy-547bbf7546-rpg5p 1/1 Running 0 52s
+ scalardl-ledger-envoy-547bbf7546-x2vlg 1/1 Running 0 52s
+ scalardl-ledger-ledger-9bdf7f8bd-29bzm 1/1 Running 0 52s
+ scalardl-ledger-ledger-9bdf7f8bd-9fklw 1/1 Running 0 52s
+ scalardl-ledger-ledger-9bdf7f8bd-9tw5x 1/1 Running 0 52s
+ schema-loader-ledger-schema-loading-46rcr 0/1 Completed 0 3m38s
+ ```
+
+ If the ScalarDL Ledger pods are deployed properly, you can see the STATUS are **Running**.
+
+1. Check if the ScalarDL Ledger services are deployed.
+
+ ```console
+ kubectl get svc
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ kubernetes ClusterIP 10.96.0.1 443/TCP 47d
+ postgresql-ledger ClusterIP 10.109.253.150 5432/TCP 15m
+ postgresql-ledger-hl ClusterIP None 5432/TCP 15m
+ scalardl-ledger-envoy ClusterIP 10.106.141.153 50051/TCP,50052/TCP 83s
+ scalardl-ledger-envoy-metrics ClusterIP 10.108.36.136 9001/TCP 83s
+ scalardl-ledger-headless ClusterIP None 50051/TCP,50053/TCP,50052/TCP 83s
+ scalardl-ledger-metrics ClusterIP 10.98.4.217 8080/TCP 83s
+ ```
+
+ If the ScalarDL Ledger services are deployed properly, you can see private IP addresses in the CLUSTER-IP column. (Note: `scalardl-ledger-headless` has no CLUSTER-IP.)
+
+## Step 7. Start a Client container
+
+We will use certificate files in a Client container. So, we create a secret resource and mount it to a Client container.
+
+1. Create secret resource `client-keys`.
+
+ ```console
+ kubectl create secret generic client-keys --from-file=certificate=./certs/client.pem --from-file=private-key=./certs/client-key.pem
+ ```
+
+1. Start a Client container on the Kubernetes cluster.
+
+ ```console
+ cat << 'EOF' | kubectl apply -f -
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: "scalardl-client"
+ spec:
+ containers:
+ - name: scalardl-client
+ image: eclipse-temurin:8
+ command: ['sleep']
+ args: ['inf']
+ volumeMounts:
+ - name: "client-keys"
+ mountPath: "/keys"
+ readOnly: true
+ volumes:
+ - name: "client-keys"
+ secret:
+ secretName: "client-keys"
+ restartPolicy: Never
+ EOF
+ ```
+
+1. Check if the Client container is running.
+
+ ```console
+ kubectl get pod scalardl-client
+ ```
+
+ [Command execution result]
+
+ ```console
+ NAME READY STATUS RESTARTS AGE
+ scalardl-client 1/1 Running 0 11s
+ ```
+
+## Step 8. Run ScalarDL sample contracts in the Client container
+
+The following explains the minimum steps. If you want to know more details about ScalarDL and the contract, please refer to the [Getting Started with ScalarDL](https://github.com/scalar-labs/scalardl/blob/master/docs/getting-started.md).
+
+1. Run bash in the Client container.
+
+ ```console
+ kubectl exec -it scalardl-client -- bash
+ ```
+
+ After this step, run each command in the Client container.
+
+1. Install the git, curl and unzip commands in the Client container.
+
+ ```console
+ apt update && apt install -y git curl unzip
+ ```
+
+1. Clone ScalarDL Java Client SDK git repository.
+
+ ```console
+ git clone https://github.com/scalar-labs/scalardl-java-client-sdk.git
+ ```
+
+1. Change the directory to `scalardl-java-client-sdk/`.
+
+ ```console
+ cd scalardl-java-client-sdk/
+ ```
+
+ ```console
+ pwd
+ ```
+
+ [Command execution result]
+
+ ```console
+ /scalardl-java-client-sdk
+ ```
+
+1. Change branch to arbitrary version.
+
+ ```console
+ git checkout -b v3.6.0 refs/tags/v3.6.0
+ ```
+
+ ```console
+ git branch
+ ```
+
+ [Command execution result]
+ ```console
+ master
+ * v3.6.0
+ ```
+
+ If you want to use another version, please specify the version (tag) you want to use. You need to use the same version of ScalarDL Ledger and ScalarDL Java Client SDK.
+
+1. Build the sample contracts.
+
+ ```console
+ ./gradlew assemble
+ ```
+
+1. Download CLI tools of ScalarDL from [ScalarDL Java Client SDK Releases](https://github.com/scalar-labs/scalardl-java-client-sdk/releases).
+
+ ```console
+ curl -OL https://github.com/scalar-labs/scalardl-java-client-sdk/releases/download/v3.6.0/scalardl-java-client-sdk-3.6.0.zip
+ ```
+
+ You need to use the same version of CLI tools and ScalarDL Ledger.
+
+1. Unzip the `scalardl-java-client-sdk-3.6.0.zip` file.
+
+ ```console
+ unzip ./scalardl-java-client-sdk-3.6.0.zip
+ ```
+
+1. Create a configuration file (client.properties) to access ScalarDL Ledger on the Kubernetes cluster.
+
+ ```console
+ cat << 'EOF' > client.properties
+ scalar.dl.client.server.host=scalardl-ledger-envoy.default.svc.cluster.local
+ scalar.dl.client.cert_holder_id=client
+ scalar.dl.client.cert_path=/keys/certificate
+ scalar.dl.client.private_key_path=/keys/private-key
+ EOF
+ ```
+
+1. Register the certificate file of the client.
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/register-cert --properties ./client.properties
+ ```
+
+1. Register the sample contract `StateUpdater`.
+
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/register-contract --properties ./client.properties --contract-id StateUpdater --contract-binary-name com.org1.contract.StateUpdater --contract-class-file ./build/classes/java/main/com/org1/contract/StateUpdater.class
+ ```
+
+1. Register the sample contract `StateReader`.
+
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/register-contract --properties ./client.properties --contract-id StateReader --contract-binary-name com.org1.contract.StateReader --contract-class-file ./build/classes/java/main/com/org1/contract/StateReader.class
+ ```
+
+1. Execute the contract `StateUpdater`.
+
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/execute-contract --properties ./client.properties --contract-id StateUpdater --contract-argument '{"asset_id": "test_asset", "state": 3}'
+ ```
+ This sample contract updates the `state` (value) of the asset named `test_asset` to `3`.
+
+1. Execute the contract `StateReader`.
+
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/execute-contract --properties ./client.properties --contract-id StateReader --contract-argument '{"asset_id": "test_asset"}'
+ ```
+
+ [Command execution result]
+
+ ```console
+ Contract result:
+ {
+ "id" : "test_asset",
+ "age" : 0,
+ "output" : {
+ "state" : 3
+ }
+ }
+ ```
+
+1. Execute a validation request for the asset.
+
+ ```console
+ ./scalardl-java-client-sdk-3.6.0/bin/validate-ledger --properties ./client.properties --asset-id "test_asset"
+ ```
+
+ [Command execution result]
+
+ ```console
+ {
+ "status_code" : "OK",
+ "Ledger" : {
+ "id" : "test_asset",
+ "age" : 0,
+ "nonce" : "f31599c6-e6b9-4b77-adc3-61cb5f119bd3",
+ "hash" : "9ExfFl5Lg9IQwdXdW9b87Bi+PWccn3OSNRbhmI/dboo=",
+ "signature" : "MEQCIG6Xa4WOWGMIIbA3PnCje4aAapYfCMerF54xRW0gaUuzAiBCA1nCAPoFWgxArB34/u9b+KeoxQBMALI/pOzMNoLExg=="
+ },
+ "Auditor" : null
+ }
+ ```
+
+ * Reference information
+ * If the asset data is not tampered with, the validation request (validate-ledger command) returns `OK` as a result.
+ * If the asset data is tampered with (e.g. the `state` value in the DB is tampered with), the validation request (validate-ledger command) returns a value other than `OK` (e.g. `INVALID_OUTPUT`) as a result, like the following.
+ [Command execution result (If the asset data is tampered with)]
+
+ ```console
+ {
+ "status_code" : "INVALID_OUTPUT",
+ "Ledger" : {
+ "id" : "test_asset",
+ "age" : 0,
+ "nonce" : "f31599c6-e6b9-4b77-adc3-61cb5f119bd3",
+ "hash" : "9ExfFl5Lg9IQwdXdW9b87Bi+PWccn3OSNRbhmI/dboo=",
+ "signature" : "MEQCIGtJerW7N93c/bvIBy/7NXxoQwGFznHMmV6RzsgHQg0dAiBu+eBxkfmMQKJY2d9fLNvCH+4b+9rl7gZ3OXJ2NYeVsA=="
+ },
+ "Auditor" : null
+ }
+ ```
+
+ * In this way, the ScalarDL Ledger can detect data tampering.
+
+## Step 9. Delete all resources
+
+After completing the ScalarDL Ledger tests on the Kubernetes cluster, remove all resources.
+
+1. Uninstall ScalarDL Ledger, ScalarDL Schema Loader, and PostgreSQL.
+
+ ```console
+ helm uninstall scalardl-ledger schema-loader-ledger postgresql-ledger
+ ```
+
+1. Remove the Client container.
+
+ ```
+ kubectl delete pod scalardl-client --force --grace-period 0
+ ```
+
+1. Remove the working directory and sample files (configuration file, key, and certificate).
+
+ ```console
+ cd ~
+ ```
+
+ ```console
+ rm -rf ~/scalardl-test/
+ ```
+
+## Further reading
+
+You can see how to get started with monitoring or logging for Scalar products in the following documents.
+
+* [Getting Started with Helm Charts (Monitoring using Prometheus Operator)](./getting-started-monitoring.md)
+* [Getting Started with Helm Charts (Logging using Loki Stack)](./getting-started-logging.md)
+* [Getting Started with Helm Charts (Scalar Manager)](./getting-started-scalar-manager.md)
diff --git a/docs/3.12/helm-charts/how-to-deploy-scalar-admin-for-kubernetes.md b/docs/3.12/helm-charts/how-to-deploy-scalar-admin-for-kubernetes.md
new file mode 100644
index 00000000..20b9d06d
--- /dev/null
+++ b/docs/3.12/helm-charts/how-to-deploy-scalar-admin-for-kubernetes.md
@@ -0,0 +1,27 @@
+# How to deploy Scalar Admin for Kubernetes
+
+This document explains how to deploy Scalar Admin for Kubernetes by using Scalar Helm Charts. For details on the custom values file for Scalar Admin for Kubernetes, see [Configure a custom values file for Scalar Admin for Kubernetes](./configure-custom-values-scalar-admin-for-kubernetes.md).
+
+## Deploy Scalar Admin for Kubernetes
+
+To deploy Scalar Admin for Kubernetes, run the following command, replacing the contents in the angle brackets as described:
+
+```console
+helm install scalar-labs/scalar-admin-for-kubernetes -n -f / --version
+```
+
+## Upgrade a Scalar Admin for Kubernetes job
+
+To upgrade a Scalar Admin for Kubernetes job, run the following command, replacing the contents in the angle brackets as described:
+
+```console
+helm upgrade scalar-labs/scalar-admin-for-kubernetes -n -f / --version
+```
+
+## Delete a Scalar Admin for Kubernetes job
+
+To delete a Scalar Admin for Kubernetes job, run the following command, replacing the contents in the angle brackets as described:
+
+```console
+helm uninstall -n
+```
diff --git a/docs/3.12/helm-charts/how-to-deploy-scalar-manager.md b/docs/3.12/helm-charts/how-to-deploy-scalar-manager.md
new file mode 100644
index 00000000..9e592d40
--- /dev/null
+++ b/docs/3.12/helm-charts/how-to-deploy-scalar-manager.md
@@ -0,0 +1,46 @@
+# How to deploy Scalar Manager
+
+This document explains how to deploy Scalar Manager using Scalar Helm Charts. You must prepare your custom values file. Please refer to the following document for more details on the custom values file for Scalar Manager.
+
+* [Configure a custom values file for Scalar Manager](./configure-custom-values-scalar-manager.md)
+
+## Deploy kube-prometheus-stack and loki-stack
+
+When you use Scalar Manager, you must deploy kube-prometheus-stack and loki-stack. Please refer to the following documents for more details on how to deploy them.
+
+* [Getting Started with Helm Charts (Monitoring using Prometheus Operator)](https://github.com/scalar-labs/helm-charts/blob/main/docs/getting-started-monitoring.md)
+* [Getting Started with Helm Charts (Logging using Loki Stack)](https://github.com/scalar-labs/helm-charts/blob/main/docs/getting-started-logging.md)
+
+When you deploy kube-prometheus-stack, you must set the following configuration in the custom values file for kube-prometheus-stack.
+
+```yaml
+grafana:
+ grafana-ini:
+ security:
+ allow_embedding: true
+ cookie_samesite: disabled
+```
+
+If you already have a deployment of kube-prometheus-stack, please upgrade the configuration using the following command.
+
+```console
+helm upgrade prometheus-community/kube-prometheus-stack -n -f / --version
+```
+
+## Deploy Scalar Manager
+
+```console
+helm install scalar-labs/scalar-manager -n -f / --version
+```
+
+## Upgrade the deployment of Scalar Manager
+
+```console
+helm upgrade scalar-labs/scalar-manager -n -f / --version
+```
+
+## Delete the deployment of Scalar Manager
+
+```console
+helm uninstall -n
+```
diff --git a/docs/3.12/helm-charts/how-to-deploy-scalar-products.md b/docs/3.12/helm-charts/how-to-deploy-scalar-products.md
new file mode 100644
index 00000000..ba50c2a6
--- /dev/null
+++ b/docs/3.12/helm-charts/how-to-deploy-scalar-products.md
@@ -0,0 +1,60 @@
+# Deploy Scalar products using Scalar Helm Charts
+
+This document explains how to deploy Scalar products using Scalar Helm Charts. If you want to test Scalar products on your local environment using a minikube cluster, please refer to the following getting started guide.
+
+* [Getting Started with Scalar Helm Charts](./getting-started-scalar-helm-charts.md)
+
+## Prerequisites
+
+### Install the helm command
+
+You must install the helm command to use Scalar Helm Charts. Please install the helm command according to the [Helm document](https://helm.sh/docs/intro/install/).
+
+### Add the Scalar Helm Charts repository
+
+```console
+helm repo add scalar-labs https://scalar-labs.github.io/helm-charts
+```
+```console
+helm repo update scalar-labs
+```
+
+### Prepare a Kubernetes cluster
+
+You must prepare a Kubernetes cluster for the deployment of Scalar products. If you use EKS (Amazon Elastic Kubernetes Service) or AKS (Azure Kubernetes Service) in the production environment. Please refer to the following document for more details.
+
+* [scalar-labs/scalar-kubernetes](https://github.com/scalar-labs/scalar-kubernetes/blob/master/README.md)
+
+You must prepare a supported version of Kubernetes. For versions that Scalar Helm Charts supports, see [Supported Kubernetes versions](https://github.com/scalar-labs/helm-charts#supported-kubernetes-versions).
+
+### Prepare a database (ScalarDB, ScalarDL Ledger, ScalarDL Auditor)
+
+You must prepare a database as a backend storage of ScalarDB/ScalarDL. You can see the supported databases by ScalarDB/ScalarDL in the following document.
+
+* [ScalarDB Supported Databases](https://github.com/scalar-labs/scalardb/blob/master/docs/scalardb-supported-databases.md)
+
+### Prepare a custom values file
+
+You must prepare your custom values file based on your environment. Please refer to the following documents for more details on how to create a custom values file.
+
+* [Configure a custom values file for Scalar Helm Charts](./configure-custom-values-file.md)
+
+### Create a Secret resource for authentication of the container registry (Optional)
+
+If you use a Kubernetes cluster other than EKS or AKS, you need to create a Secret resource that includes the credentials and set the Secret name to `imagePullSecrets[].name` in your custom values file. Please refer to the following documents for more details on creating the Secret resource and setting it in your custom values file.
+
+* [Deploy containers on Kubernetes other than EKS from AWS Marketplace using Scalar Helm Charts](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md#byol-deploy-containers-on-kubernetes-other-than-eks-from-aws-marketplace-using-scalar-helm-charts)
+* [Deploy containers on Kubernetes other than AKS (Azure Kubernetes Service) from your private container registry using Scalar Helm Charts](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md#deploy-containers-on-kubernetes-other-than-aks-azure-kubernetes-service-from-your-private-container-registry-using-scalar-helm-charts)
+
+## Deploy Scalar products
+
+Please refer to the following documents for more details on how to deploy each product.
+
+* [ScalarDB Cluster](./how-to-deploy-scalardb-cluster.md)
+* [ScalarDB Analytics with PostgreSQL](./how-to-deploy-scalardb-analytics-postgresql.md)
+* [ScalarDL Ledger](./how-to-deploy-scalardl-ledger.md)
+* [ScalarDL Auditor](./how-to-deploy-scalardl-auditor.md)
+* [Scalar Admin for Kubernetes](./how-to-deploy-scalar-admin-for-kubernetes.md)
+* [Scalar Manager](./how-to-deploy-scalar-manager.md)
+* [[Deprecated] ScalarDB Server](./how-to-deploy-scalardb.md)
+* [[Deprecated] ScalarDB GraphQL](./how-to-deploy-scalardb-graphql.md)
diff --git a/docs/3.12/helm-charts/how-to-deploy-scalardb-analytics-postgresql.md b/docs/3.12/helm-charts/how-to-deploy-scalardb-analytics-postgresql.md
new file mode 100644
index 00000000..b2454de4
--- /dev/null
+++ b/docs/3.12/helm-charts/how-to-deploy-scalardb-analytics-postgresql.md
@@ -0,0 +1,35 @@
+# How to deploy ScalarDB Analytics with PostgreSQL
+
+This document explains how to deploy ScalarDB Analytics with PostgreSQL by using Scalar Helm Charts. For details on the custom values file for ScalarDB Analytics with PostgreSQL, see [Configure a custom values file for ScalarDB Analytics with PostgreSQL](./configure-custom-values-scalardb-analytics-postgresql.md).
+
+## Prepare a secret resource
+
+You must create a secret resource `scalardb-analytics-postgresql-superuser-password` with the key `superuser-password` that includes a superuser password for PostgreSQL before you deploy ScalarDB Analytics with PostgreSQL. Scalar Helm Chart mounts this secret resource and sets the `POSTGRES_PASSWORD` environment variable to the value of the `superuser-password` key.
+
+```console
+kubectl create secret generic scalardb-analytics-postgresql-superuser-password --from-literal=superuser-password= -n
+```
+
+## Deploy ScalarDB Analytics with PostgreSQL
+
+To deploy ScalarDB Analytics with PostgreSQL, run the following command, replacing the contents in the angle brackets as described:
+
+```console
+helm install scalar-labs/scalardb-analytics-postgresql -n -f / --version
+```
+
+## Upgrade a ScalarDB Analytics with PostgreSQL deployment
+
+To upgrade a ScalarDB Analytics with PostgreSQL deployment, run the following command, replacing the contents in the angle brackets as described:
+
+```console
+helm upgrade scalar-labs/scalardb-analytics-postgresql -n -f / --version
+```
+
+## Delete a ScalarDB Analytics with PostgreSQL deployment
+
+To delete a ScalarDB Analytics with PostgreSQL deployment, run the following command, replacing the contents in the angle brackets as described:
+
+```console
+helm uninstall -n
+```
diff --git a/docs/3.12/helm-charts/how-to-deploy-scalardb-cluster.md b/docs/3.12/helm-charts/how-to-deploy-scalardb-cluster.md
new file mode 100644
index 00000000..c45987a0
--- /dev/null
+++ b/docs/3.12/helm-charts/how-to-deploy-scalardb-cluster.md
@@ -0,0 +1,72 @@
+# How to deploy ScalarDB Cluster
+
+This document explains how to deploy ScalarDB Cluster by using Scalar Helm Charts. For details on the custom values file for ScalarDB Cluster, see [Configure a custom values file for ScalarDB Cluster](./configure-custom-values-scalardb-cluster.md).
+
+## Deploy ScalarDB Cluster
+
+```console
+helm install scalar-labs/scalardb-cluster -n -f / --version
+```
+
+## Upgrade a ScalarDB Cluster deployment
+
+```console
+helm upgrade scalar-labs/scalardb-cluster -n -f / --version
+```
+
+## Delete a ScalarDB Cluster deployment
+
+```console
+helm uninstall -n
+```
+
+## Deploy your client application on Kubernetes with `direct-kubernetes` mode
+
+If you use ScalarDB Cluster with `direct-kubernetes` mode, you must:
+
+1. Deploy your application pods on the same Kubernetes cluster as ScalarDB Cluster.
+2. Create three Kubernetes resources (`Role`, `RoleBinding`, and `ServiceAccount`).
+3. Mount the `ServiceAccount` on your application pods.
+
+This method is necessary because the ScalarDB Cluster client library with `direct-kubernetes` mode runs the Kubernetes API from inside of your application pods to get information about the ScalarDB Cluster pods.
+
+* Role
+
+ ```yaml
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: Role
+ metadata:
+ name: scalardb-cluster-client-role
+ namespace:
+ rules:
+ - apiGroups: [""]
+ resources: ["endpoints"]
+ verbs: ["get", "watch", "list"]
+ ```
+
+* RoleBinding
+
+ ```yaml
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: scalardb-cluster-client-rolebinding
+ namespace:
+ subjects:
+ - kind: ServiceAccount
+ name: scalardb-cluster-client-sa
+ roleRef:
+ kind: Role
+ name: scalardb-cluster-role
+ apiGroup: rbac.authorization.k8s.io
+ ```
+
+* ServiceAccount
+
+ ```yaml
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: scalardb-cluster-client-sa
+ namespace:
+ ```
diff --git a/docs/3.12/helm-charts/how-to-deploy-scalardb-graphql.md b/docs/3.12/helm-charts/how-to-deploy-scalardb-graphql.md
new file mode 100644
index 00000000..89db3cae
--- /dev/null
+++ b/docs/3.12/helm-charts/how-to-deploy-scalardb-graphql.md
@@ -0,0 +1,41 @@
+# [Deprecated] How to deploy ScalarDB GraphQL
+
+{% capture notice--info %}
+**Note**
+
+ScalarDB GraphQL Server is now deprecated. Please use [ScalarDB Cluster](./how-to-deploy-scalardb-cluster.md) instead.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+This document explains how to deploy ScalarDB GraphQL using Scalar Helm Charts. You must prepare your custom values file. Please refer to the following document for more details on the custom values file for ScalarDB GraphQL.
+
+* [[Deprecated] Configure a custom values file for ScalarDB GraphQL](./configure-custom-values-scalardb-graphql.md)
+
+## Deploy ScalarDB Server (recommended option)
+
+When you deploy ScalarDB GraphQL, it is recommended to deploy ScalarDB Server between ScalarDB GraphQL and backend databases as follows.
+
+```
+[Client] ---> [ScalarDB GraphQL] ---> [ScalarDB Server] ---> [Backend databases]
+```
+
+Please deploy ScalarDB Server before you deploy ScalarDB GraphQL according to the document [How to deploy ScalarDB Server](./how-to-deploy-scalardb.md).
+
+## Deploy ScalarDB GraphQL
+
+```console
+helm install scalar-labs/scalardb-graphql -n -f / --version
+```
+
+## Upgrade the deployment of ScalarDB GraphQL
+
+```console
+helm upgrade scalar-labs/scalardb-graphql -n -f / --version
+```
+
+## Delete the deployment of ScalarDB GraphQL
+
+```console
+helm uninstall -n
+```
diff --git a/docs/3.12/helm-charts/how-to-deploy-scalardb.md b/docs/3.12/helm-charts/how-to-deploy-scalardb.md
new file mode 100644
index 00000000..8482477d
--- /dev/null
+++ b/docs/3.12/helm-charts/how-to-deploy-scalardb.md
@@ -0,0 +1,31 @@
+# [Deprecated] How to deploy ScalarDB Server
+
+{% capture notice--info %}
+**Note**
+
+ScalarDB Server is now deprecated. Please use [ScalarDB Cluster](./how-to-deploy-scalardb-cluster.md) instead.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
+
+This document explains how to deploy ScalarDB Server using Scalar Helm Charts. You must prepare your custom values file. Please refer to the following document for more details on the custom values file for ScalarDB Server.
+
+* [[Deprecated] Configure a custom values file for ScalarDB Server](./configure-custom-values-scalardb.md)
+
+## Deploy ScalarDB Server
+
+```console
+helm install scalar-labs/scalardb -n -f / --version
+```
+
+## Upgrade the deployment of ScalarDB Server
+
+```console
+helm upgrade scalar-labs/scalardb -n -f / --version
+```
+
+## Delete the deployment of ScalarDB Server
+
+```console
+helm uninstall -n
+```
diff --git a/docs/3.12/helm-charts/how-to-deploy-scalardl-auditor.md b/docs/3.12/helm-charts/how-to-deploy-scalardl-auditor.md
new file mode 100644
index 00000000..085d0f09
--- /dev/null
+++ b/docs/3.12/helm-charts/how-to-deploy-scalardl-auditor.md
@@ -0,0 +1,38 @@
+# How to deploy ScalarDL Auditor
+
+This document explains how to deploy ScalarDL Auditor using Scalar Helm Charts. You must prepare your custom values file. Please refer to the following document for more details on the custom values file for ScalarDL Auditor and ScalarDL Schema Loader.
+
+* [Configure a custom values file for ScalarDL Auditor](./configure-custom-values-scalardl-auditor.md)
+* [Configure a custom values file for ScalarDL Schema Loader](./configure-custom-values-scalardl-schema-loader.md)
+
+## Prepare a private key file and a certificate file
+
+When you deploy ScalarDL Auditor, you must create a Secrete resource to mount the private key file and the certificate file on the ScalarDL Auditor pods.
+
+For more details on how to mount the key and certificate files on the ScalarDL pods, refer to [Mount key and certificate files on a pod in ScalarDL Helm Charts](./mount-files-or-volumes-on-scalar-pods.md#mount-key-and-certificate-files-on-a-pod-in-scalardl-helm-charts).
+
+## Create schemas for ScalarDL Auditor (Deploy ScalarDL Schema Loader)
+
+Before you deploy ScalarDL Auditor, you must create schemas for ScalarDL Auditor on the backend database.
+
+```console
+helm install scalar-labs/schema-loading -n -f / --version
+```
+
+## Deploy ScalarDL Auditor
+
+```console
+helm install scalar-labs/scalardl-audit -n -f / --version
+```
+
+## Upgrade the deployment of ScalarDL Auditor
+
+```console
+helm upgrade scalar-labs/scalardl-audit -n -f / --version
+```
+
+## Delete the deployment of ScalarDL Auditor and ScalarDL Schema Loader
+
+```console
+helm uninstall -n
+```
diff --git a/docs/3.12/helm-charts/how-to-deploy-scalardl-ledger.md b/docs/3.12/helm-charts/how-to-deploy-scalardl-ledger.md
new file mode 100644
index 00000000..0ae71db7
--- /dev/null
+++ b/docs/3.12/helm-charts/how-to-deploy-scalardl-ledger.md
@@ -0,0 +1,40 @@
+# How to deploy ScalarDL Ledger
+
+This document explains how to deploy ScalarDL Ledger using Scalar Helm Charts. You must prepare your custom values file. Please refer to the following document for more details on the custom values file for ScalarDL Ledger and ScalarDL Schema Loader.
+
+* [Configure a custom values file for ScalarDL Ledger](./configure-custom-values-scalardl-ledger.md)
+* [Configure a custom values file for ScalarDL Schema Loader](./configure-custom-values-scalardl-schema-loader.md)
+
+## Prepare a private key file (optional / it is necessary if you use ScalarDL Auditor)
+
+If you use the [asset proofs](https://github.com/scalar-labs/scalardl/blob/master/docs/how-to-use-proof.md) of ScalarDL Ledger, you must create a Secrete resource to mount the private key file on the ScalarDL Ledger pods. If you use ScalarDL Auditor, asset proof is necessary.
+
+Please refer to the following document for more details on how to mount the key/certificate files on the ScalarDL pods.
+
+* [Mount key and certificate files on a pod in ScalarDL Helm Charts](./mount-files-or-volumes-on-scalar-pods.md#mount-key-and-certificate-files-on-a-pod-in-scalardl-helm-charts)
+
+## Create schemas for ScalarDL Ledger (Deploy ScalarDL Schema Loader)
+
+Before you deploy ScalarDL Ledger, you must create schemas for ScalarDL Ledger on the backend database.
+
+```console
+helm install scalar-labs/schema-loading -n -f / --version
+```
+
+## Deploy ScalarDL Ledger
+
+```console
+helm install scalar-labs/scalardl -n -f / --version
+```
+
+## Upgrade the deployment of ScalarDL Ledger
+
+```console
+helm upgrade scalar-labs/scalardl -n -f / --version
+```
+
+## Delete the deployment of ScalarDL Ledger and ScalarDL Schema Loader
+
+```console
+helm uninstall -n
+```
diff --git a/docs/3.12/helm-charts/mount-files-or-volumes-on-scalar-pods.md b/docs/3.12/helm-charts/mount-files-or-volumes-on-scalar-pods.md
new file mode 100644
index 00000000..a89610ca
--- /dev/null
+++ b/docs/3.12/helm-charts/mount-files-or-volumes-on-scalar-pods.md
@@ -0,0 +1,133 @@
+# Mount any files or volumes on Scalar product pods
+
+You can mount any files or volumes on Scalar product pods when you use ScalarDB Server, ScalarDB Cluster, ScalarDB Analytics with PostgreSQL, or ScalarDL Helm Charts (ScalarDL Ledger and ScalarDL Auditor).
+
+## Mount key and certificate files on a pod in ScalarDL Helm Charts
+
+You must mount the key and certificate files to run ScalarDL Auditor.
+
+* Configuration example
+ * ScalarDL Ledger
+
+ ```yaml
+ ledger:
+ ledgerProperties: |
+ ...
+ scalar.dl.ledger.proof.enabled=true
+ scalar.dl.ledger.auditor.enabled=true
+ scalar.dl.ledger.proof.private_key_path=/keys/private-key
+ ```
+
+ * ScalarDL Auditor
+
+ ```yaml
+ auditor:
+ auditorProperties: |
+ ...
+ scalar.dl.auditor.private_key_path=/keys/private-key
+ scalar.dl.auditor.cert_path=/keys/certificate
+ ```
+
+In this example, you need to mount a **private-key** and a **certificate** file under the `/keys` directory in the container. And, you need to mount files named `private-key` and `certificate`. You can use `extraVolumes` and `extraVolumeMounts` to mount these files.
+
+1. Set `extraVolumes` and `extraVolumeMounts` in the custom values file using the same syntax of Kubernetes manifest. You need to specify the directory name to the key `mountPath`.
+ * Example
+ * ScalarDL Ledger
+
+ ```yaml
+ ledger:
+ extraVolumes:
+ - name: ledger-keys
+ secret:
+ secretName: ledger-keys
+ extraVolumeMounts:
+ - name: ledger-keys
+ mountPath: /keys
+ readOnly: true
+ ```
+
+ * ScalarDL Auditor
+
+ ```yaml
+ auditor:
+ extraVolumes:
+ - name: auditor-keys
+ secret:
+ secretName: auditor-keys
+ extraVolumeMounts:
+ - name: auditor-keys
+ mountPath: /keys
+ readOnly: true
+ ```
+
+1. Create a `Secret` resource that includes key and certificate files.
+
+ You need to specify the file name as keys of `Secret`.
+
+ * Example
+ * ScalarDL Ledger
+
+ ```console
+ kubectl create secret generic ledger-keys \
+ --from-file=private-key=./ledger-key.pem
+ ```
+
+ * ScalarDL Auditor
+
+ ```console
+ kubectl create secret generic auditor-keys \
+ --from-file=private-key=./auditor-key.pem \
+ --from-file=certificate=./auditor-cert.pem
+ ```
+
+1. Deploy Scalar products with the above custom values file.
+
+ After deploying Scalar products, key and certificate files are mounted under the `/keys` directory as follows.
+
+ * Example
+ * ScalarDL Ledger
+
+ ```console
+ $ ls -l /keys/
+ total 0
+ lrwxrwxrwx 1 root root 18 Jun 27 03:12 private-key -> ..data/private-key
+ ```
+
+ * ScalarDL Auditor
+
+ ```console
+ $ ls -l /keys/
+ total 0
+ lrwxrwxrwx 1 root root 18 Jun 27 03:16 certificate -> ..data/certificate
+ lrwxrwxrwx 1 root root 18 Jun 27 03:16 private-key -> ..data/private-key
+ ```
+
+## Mount emptyDir to get a heap dump file
+
+You can mount emptyDir to Scalar product pods by using the following keys in your custom values file. For example, you can use this volume to get a heap dump of Scalar products.
+
+* Keys
+ * `scalardb.extraVolumes` / `scalardb.extraVolumeMounts` (ScalarDB Server)
+ * `scalardbCluster.extraVolumes` / `scalardbCluster.extraVolumeMounts` (ScalarDB Cluster)
+ * `scalardbAnalyticsPostgreSQL.extraVolumes` / `scalardbAnalyticsPostgreSQL.extraVolumeMounts` (ScalarDB Analytics with PostgreSQL)
+ * `ledger.extraVolumes` / `ledger.extraVolumeMounts` (ScalarDL Ledger)
+ * `auditor.extraVolumes` / `auditor.extraVolumeMounts` (ScalarDL Auditor)
+
+* Example (ScalarDB Server)
+
+ ```yaml
+ scalardb:
+ extraVolumes:
+ - name: heap-dump
+ emptyDir: {}
+ extraVolumeMounts:
+ - name: heap-dump
+ mountPath: /dump
+ ```
+
+In this example, you can see the mounted volume in the ScalarDB Server pod as follows.
+
+```console
+$ ls -ld /dump
+drwxrwxrwx 2 root root 4096 Feb 6 07:43 /dump
+```
diff --git a/docs/3.12/helm-charts/use-secret-for-credentials.md b/docs/3.12/helm-charts/use-secret-for-credentials.md
new file mode 100644
index 00000000..23ce8911
--- /dev/null
+++ b/docs/3.12/helm-charts/use-secret-for-credentials.md
@@ -0,0 +1,203 @@
+# How to use Secret resources to pass credentials as environment variables into the properties file
+
+You can pass credentials like **username** or **password** as environment variables via a `Secret` resource in Kubernetes. The docker images for previous versions of Scalar products use the `dockerize` command for templating properties files. The docker images for the latest versions of Scalar products get values directly from environment variables.
+
+Note: You cannot use the following environment variable names in your custom values file since these are used in the Scalar Helm Chart internal.
+
+```console
+HELM_SCALAR_DB_CONTACT_POINTS
+HELM_SCALAR_DB_CONTACT_PORT
+HELM_SCALAR_DB_USERNAME
+HELM_SCALAR_DB_PASSWORD
+HELM_SCALAR_DB_STORAGE
+HELM_SCALAR_DL_LEDGER_PROOF_ENABLED
+HELM_SCALAR_DL_LEDGER_AUDITOR_ENABLED
+HELM_SCALAR_DL_LEDGER_PROOF_PRIVATE_KEY_PATH
+HELM_SCALAR_DL_AUDITOR_SERVER_PORT
+HELM_SCALAR_DL_AUDITOR_SERVER_PRIVILEGED_PORT
+HELM_SCALAR_DL_AUDITOR_SERVER_ADMIN_PORT
+HELM_SCALAR_DL_AUDITOR_LEDGER_HOST
+HELM_SCALAR_DL_AUDITOR_CERT_HOLDER_ID
+HELM_SCALAR_DL_AUDITOR_CERT_VERSION
+HELM_SCALAR_DL_AUDITOR_CERT_PATH
+HELM_SCALAR_DL_AUDITOR_PRIVATE_KEY_PATH
+SCALAR_DB_LOG_LEVEL
+SCALAR_DL_LEDGER_LOG_LEVEL
+SCALAR_DL_AUDITOR_LOG_LEVEL
+SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAMESPACE_NAME
+SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAME
+```
+
+1. Set environment variable name to the properties configuration in the custom values file.
+ * Example
+ * ScalarDB Server
+ * ScalarDB Server 3.7 or earlier (Go template syntax)
+
+ {% raw %}
+ ```yaml
+ scalardb:
+ databaseProperties: |
+ ...
+ scalar.db.username={{ default .Env.SCALAR_DB_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DB_PASSWORD "" }}
+ ...
+ ```
+ {% endraw %}
+
+ * ScalarDB Server 3.8 or later (Apache Commons Text syntax)
+
+ ```yaml
+ scalardb:
+ databaseProperties: |
+ ...
+ scalar.db.username=${env:SCALAR_DB_USERNAME}
+ scalar.db.password=${env:SCALAR_DB_PASSWORD}
+ ...
+ ```
+
+ * ScalarDB Cluster
+
+ ```yaml
+ scalardbCluster:
+ scalardbClusterNodeProperties: |
+ ...
+ scalar.db.username=${env:SCALAR_DB_USERNAME}
+ scalar.db.password=${env:SCALAR_DB_PASSWORD}
+ ...
+ ```
+
+ * ScalarDB Analytics with PostgreSQL
+ ```yaml
+ scalardbAnalyticsPostgreSQL:
+ databaseProperties: |
+ ...
+ scalar.db.username=${env:SCALAR_DB_USERNAME}
+ scalar.db.password=${env:SCALAR_DB_PASSWORD}
+ ...
+ ```
+ * ScalarDL Ledger (Go template syntax)
+
+ {% raw %}
+ ```yaml
+ ledger:
+ ledgerProperties: |
+ ...
+ scalar.db.username={{ default .Env.SCALAR_DB_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DB_PASSWORD "" }}
+ ...
+ ```
+ {% endraw %}
+
+ * ScalarDL Auditor (Go template syntax)
+
+ {% raw %}
+ ```yaml
+ auditor:
+ auditorProperties: |
+ ...
+ scalar.db.username={{ default .Env.SCALAR_DB_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DB_PASSWORD "" }}
+ ...
+ ```
+ {% endraw %}
+
+ * ScalarDL Schema Loader (Go template syntax)
+
+ {% raw %}
+ ```yaml
+ schemaLoading:
+ databaseProperties: |
+ ...
+ scalar.db.username={{ default .Env.SCALAR_DB_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DB_PASSWORD "" }}
+ ...
+ ```
+ {% endraw %}
+
+1. Create a `Secret` resource that includes credentials.
+ You need to specify the environment variable name as keys of the `Secret`.
+ * Example
+
+ ```console
+ kubectl create secret generic scalardb-credentials-secret \
+ --from-literal=SCALAR_DB_USERNAME=postgres \
+ --from-literal=SCALAR_DB_PASSWORD=postgres
+ ```
+
+1. Set the `Secret` name to the following keys in the custom values file.
+ * Keys
+ * `scalardb.secretName` (ScalarDB Server)
+ * `scalardbCluster.secretName` (ScalarDB Cluster)
+ * `scalardbAnalyticsPostgreSQL.secretName` (ScalarDB Analytics with PostgreSQL)
+ * `ledger.secretName` (ScalarDL Ledger)
+ * `auditor.secretName` (ScalarDL Auditor)
+ * `schemaLoading.secretName` (ScalarDL Schema Loader)
+ * Example
+ * ScalarDB Server
+
+ ```yaml
+ scalardb:
+ secretName: "scalardb-credentials-secret"
+ ```
+
+ * ScalarDB Cluster
+
+ ```yaml
+ scalardbCluster:
+ secretName: "scalardb-cluster-credentials-secret"
+ ```
+
+ * ScalarDB Analytics with PostgreSQL
+ ```yaml
+ scalardbAnalyticsPostgreSQL:
+ secretName: scalardb-analytics-postgresql-credentials-secret
+ ```
+ * ScalarDL Ledger
+
+ ```yaml
+ ledger:
+ secretName: "ledger-credentials-secret"
+ ```
+
+ * ScalarDL Auditor
+
+ ```yaml
+ auditor:
+ secretName: "auditor-credentials-secret"
+ ```
+
+ * ScalarDL Schema Loader
+
+ ```yaml
+ schemaLoading:
+ secretName: "schema-loader-ledger-credentials-secret"
+ ```
+
+1. Deploy Scalar products with the above custom values file.
+
+ After deploying Scalar products, the Go template strings (environment variables) are replaced by the values of the `Secret`.
+
+ * Example
+ * Custom values file
+
+ {% raw %}
+ ```yaml
+ scalardb:
+ databaseProperties: |
+ scalar.db.contact_points=jdbc:postgresql://postgresql-scalardb.default.svc.cluster.local:5432/postgres
+ scalar.db.username={{ default .Env.SCALAR_DB_USERNAME "" }}
+ scalar.db.password={{ default .Env.SCALAR_DB_PASSWORD "" }}
+ scalar.db.storage=jdbc
+ ```
+ {% endraw %}
+
+ * Properties file in containers
+
+ ```properties
+ scalar.db.contact_points=jdbc:postgresql://postgresql-scalardb.default.svc.cluster.local:5432/postgres
+ scalar.db.username=postgres
+ scalar.db.password=postgres
+ scalar.db.storage=jdbc
+ ```
+
+ If you use Apache Commons Text syntax, Scalar products get values directly from environment variables.
diff --git a/docs/3.12/images/data_model.png b/docs/3.12/images/data_model.png
new file mode 100644
index 00000000..15a0e4d4
Binary files /dev/null and b/docs/3.12/images/data_model.png differ
diff --git a/docs/3.12/images/software_stack.png b/docs/3.12/images/software_stack.png
new file mode 100644
index 00000000..75fba6e6
Binary files /dev/null and b/docs/3.12/images/software_stack.png differ
diff --git a/docs/3.12/images/two_phase_commit_load_balancing.png b/docs/3.12/images/two_phase_commit_load_balancing.png
new file mode 100644
index 00000000..5cdc26f0
Binary files /dev/null and b/docs/3.12/images/two_phase_commit_load_balancing.png differ
diff --git a/docs/3.12/images/two_phase_commit_sequence_diagram.png b/docs/3.12/images/two_phase_commit_sequence_diagram.png
new file mode 100644
index 00000000..116ef635
Binary files /dev/null and b/docs/3.12/images/two_phase_commit_sequence_diagram.png differ
diff --git a/docs/3.12/index.md b/docs/3.12/index.md
new file mode 100644
index 00000000..a0c5f239
--- /dev/null
+++ b/docs/3.12/index.md
@@ -0,0 +1,83 @@
+# ScalarDB
+
+[![CI](https://github.com/scalar-labs/scalardb/actions/workflows/ci.yaml/badge.svg?branch=master)](https://github.com/scalar-labs/scalardb/actions/workflows/ci.yaml)
+
+ScalarDB is a universal transaction manager that achieves:
+- database/storage-agnostic ACID transactions in a scalable manner even if an underlying database or storage is not ACID-compliant.
+- multi-storage/database/service ACID transactions that can span multiple (possibly different) databases, storages, and services.
+
+## Install
+The library is available on [maven central repository](https://mvnrepository.com/artifact/com.scalar-labs/scalardb).
+You can install it in your application using your build tool such as Gradle and Maven.
+
+To add a dependency on ScalarDB using Gradle, use the following:
+```gradle
+dependencies {
+ implementation 'com.scalar-labs:scalardb:3.12.0'
+}
+```
+
+To add a dependency using Maven:
+```xml
+
+ com.scalar-labs
+ scalardb
+ 3.12.0
+
+```
+
+## Docs
+* [Getting started](getting-started-with-scalardb.md)
+* [Java API Guide](api-guide.md)
+* [ScalarDB Samples](https://github.com/scalar-labs/scalardb-samples)
+* [ScalarDB Server](scalardb-server.md)
+* [Multi-storage Transactions](multi-storage-transactions.md)
+* [Two-phase Commit Transactions](two-phase-commit-transactions.md)
+* [Design document](design.md)
+* [Schema Loader](schema-loader.md)
+* [Requirements and Recommendations for the Underlying Databases of ScalarDB](requirements.md)
+* [How to Back up and Restore](backup-restore.md)
+* [ScalarDB supported databases](scalardb-supported-databases.md)
+* [Configurations](configurations.md)
+* [Storage abstraction](storage-abstraction.md)
+* Slides
+ * [Making Cassandra more capable, faster, and more reliable](https://speakerdeck.com/scalar/making-cassandra-more-capable-faster-and-more-reliable-at-apachecon-at-home-2020) at ApacheCon@Home 2020
+ * [Scalar DB: A library that makes non-ACID databases ACID-compliant](https://speakerdeck.com/scalar/scalar-db-a-library-that-makes-non-acid-databases-acid-compliant) at Database Lounge Tokyo #6 2020
+ * [Transaction Management on Cassandra](https://speakerdeck.com/scalar/transaction-management-on-cassandra) at Next Generation Cassandra Conference / ApacheCon NA 2019
+* Javadoc
+ * [scalardb](https://javadoc.io/doc/com.scalar-labs/scalardb/latest/index.html) - ScalarDB: A universal transaction manager that achieves database-agnostic transactions and distributed transactions that span multiple databases
+ * [scalardb-rpc](https://javadoc.io/doc/com.scalar-labs/scalardb-rpc/latest/index.html) - ScalarDB RPC libraries
+ * [scalardb-server](https://javadoc.io/doc/com.scalar-labs/scalardb-server/latest/index.html) - ScalarDB Server: A gRPC interface of ScalarDB
+ * [scalardb-schema-loader](https://javadoc.io/doc/com.scalar-labs/scalardb-schema-loader/latest/index.html) - ScalarDB Schema Loader: A tool for schema creation and schema deletion in ScalarDB
+* [Jepsen tests](https://github.com/scalar-labs/scalar-jepsen)
+* [TLA+](https://github.com/scalar-labs/scalardb/tree/master/tla+/consensus-commit)
+
+## Contributing
+This library is mainly maintained by the Scalar Engineering Team, but of course we appreciate any help.
+
+* For asking questions, finding answers and helping other users, please go to [stackoverflow](https://stackoverflow.com/) and use [scalardb](https://stackoverflow.com/questions/tagged/scalardb) tag.
+* For filing bugs, suggesting improvements, or requesting new features, help us out by opening an issue.
+
+Here are the contributors we are especially thankful for:
+- [Toshihiro Suzuki](https://github.com/brfrn169) - created [Phoenix adapter](https://github.com/scalar-labs/scalardb-phoenix) for ScalarDB
+- [Yonezawa-T2](https://github.com/Yonezawa-T2) - reported bugs around Serializable and proposed a new Serializable strategy (now named Extra-Read)
+
+## Development
+
+### Pre-commit hook
+
+This project uses [pre-commit](https://pre-commit.com/) to automate code format and so on as much as possible. If you're interested in the development of ScalarDB, please [install pre-commit](https://pre-commit.com/#installation) and the git hook script as follows.
+
+```
+$ ls -a .pre-commit-config.yaml
+.pre-commit-config.yaml
+$ pre-commit install
+```
+
+The code formatter is automatically executed when committing files. A commit will fail and be formatted by the formatter when any invalid code format is detected. Try to commit the change again.
+
+## License
+ScalarDB is dual-licensed under both the Apache 2.0 License (found in the LICENSE file in the root directory) and a commercial license.
+You may select, at your option, one of the above-listed licenses.
+The commercial license includes several enterprise-grade features such as management tools and declarative query interfaces like GraphQL and SQL interfaces.
+Regarding the commercial license, please [contact us](https://scalar-labs.com/contact_us/) for more information.
diff --git a/docs/3.12/multi-storage-transactions.md b/docs/3.12/multi-storage-transactions.md
new file mode 100644
index 00000000..018c808b
--- /dev/null
+++ b/docs/3.12/multi-storage-transactions.md
@@ -0,0 +1,60 @@
+# Multi-Storage Transactions
+
+ScalarDB transactions can span multiple storages or databases while maintaining ACID compliance by using a feature called *multi-storage transactions*.
+
+This page explains how multi-storage transactions work and how to configure the feature in ScalarDB.
+
+## How multi-storage transactions work in ScalarDB
+
+In ScalarDB, the `multi-storage` implementation holds multiple storage instances and has mappings from a namespace name to a proper storage instance. When an operation is executed, the multi-storage transactions feature chooses a proper storage instance from the specified namespace by using the namespace-storage mapping and uses that storage instance.
+
+## How to configure ScalarDB to support multi-storage transactions
+
+To enable multi-storage transactions, you need to specify `consensus-commit` as the value for `scalar.db.transaction_manager`, `multi-storage` as the value for `scalar.db.storage`, and configure your databases in the ScalarDB properties file.
+
+The following is an example of configurations for multi-storage transactions:
+
+```properties
+# Consensus Commit is required to support multi-storage transactions.
+scalar.db.transaction_manager=consensus-commit
+
+# Multi-storage implementation is used for Consensus Commit.
+scalar.db.storage=multi-storage
+
+# Define storage names by using a comma-separated format.
+# In this case, "cassandra" and "mysql" are used.
+scalar.db.multi_storage.storages=cassandra,mysql
+
+# Define the "cassandra" storage.
+# When setting storage properties, such as `storage`, `contact_points`, `username`, and `password`, for multi-storage transactions, the format is `scalar.db.multi_storage.storages..`.
+# For example, to configure the `scalar.db.contact_points` property for Cassandra, specify `scalar.db.multi_storage.storages.cassandra.contact_point`.
+scalar.db.multi_storage.storages.cassandra.storage=cassandra
+scalar.db.multi_storage.storages.cassandra.contact_points=localhost
+scalar.db.multi_storage.storages.cassandra.username=cassandra
+scalar.db.multi_storage.storages.cassandra.password=cassandra
+
+# Define the "mysql" storage.
+# When defining JDBC-specific configurations for multi-storage transactions, you can follow a similar format of `scalar.db.multi_storage.storages..`.
+# For example, to configure the `scalar.db.jdbc.connection_pool.min_idle` property for MySQL, specify `scalar.db.multi_storage.storages.mysql.jdbc.connection_pool.min_idle`.
+scalar.db.multi_storage.storages.mysql.storage=jdbc
+scalar.db.multi_storage.storages.mysql.contact_points=jdbc:mysql://localhost:3306/
+scalar.db.multi_storage.storages.mysql.username=root
+scalar.db.multi_storage.storages.mysql.password=mysql
+# Define the JDBC-specific configurations for the "mysql" storage.
+scalar.db.multi_storage.storages.mysql.jdbc.connection_pool.min_idle=5
+scalar.db.multi_storage.storages.mysql.jdbc.connection_pool.max_idle=10
+scalar.db.multi_storage.storages.mysql.jdbc.connection_pool.max_total=25
+
+# Define namespace mapping from a namespace name to a storage.
+# The format is ":,...".
+scalar.db.multi_storage.namespace_mapping=user:cassandra,coordinator:mysql
+
+# Define the default storage that's used if a specified table doesn't have any mapping.
+scalar.db.multi_storage.default_storage=cassandra
+```
+
+For additional configurations, see [ScalarDB Configurations](configurations.md).
+
+## Hands-on tutorial
+
+For a hands-on tutorial, see [Create a Sample Application That Supports Multi-Storage Transactions](https://github.com/scalar-labs/scalardb-samples/tree/main/multi-storage-transaction-sample).
diff --git a/docs/3.12/requirements.md b/docs/3.12/requirements.md
new file mode 100644
index 00000000..98d86715
--- /dev/null
+++ b/docs/3.12/requirements.md
@@ -0,0 +1,51 @@
+# Requirements and Recommendations for the Underlying Databases of ScalarDB
+
+This document explains the requirements and recommendations in the underlying databases of ScalarDB to make ScalarDB applications work correctly.
+
+## Common requirements
+
+This section describes common requirements for the underlying databases when using ScalarDB.
+
+### Privileges to access the underlying databases
+
+ScalarDB operates the underlying databases not only for CRUD operations but also for performing operations like creating or altering schemas, tables, or indexes. Thus, ScalarDB basically requires a fully privileged account to access the underlying databases.
+
+## Cassandra or Cassandra-compatible database requirements
+
+The following are requirements to make ScalarDB on Cassandra or Cassandra-compatible databases work properly and for storage operations with `LINEARIZABLE` to provide linearizability and for transaction operations with `SERIALIZABLE` to provide strict serializability.
+
+### Ensure durability in Cassandra
+
+In **cassandra.yaml**, you must change `commitlog_sync` from the default `periodic` to `batch` or `group` to ensure durability in Cassandra.
+
+ScalarDB provides only the atomicity and isolation properties of ACID and requests the underlying databases to provide durability. Although you can specify `periodic`, we do not recommend doing so unless you know exactly what you are doing.
+
+### Confirm that the Cassandra-compatible database supports lightweight transactions (LWTs)
+
+You must use a Cassandra-compatible database that supports LWTs.
+
+ScalarDB does not work on some Cassandra-compatible databases that do not support LWTs, such as [Amazon Keyspaces](https://aws.amazon.com/keyspaces/). This is because the Consensus Commit transaction manager relies on the linearizable operations of underlying databases to make transactions serializable.
+
+## CosmosDB database requirements
+
+In your Azure CosmosDB account, you must set the **default consistency level** to **Strong**.
+
+Consensus Commit, the ScalarDB transaction protocol, requires linearizable reads. By setting the **default consistency level** to **Strong**, CosmosDB can guarantee linearizability.
+
+For instructions on how to configure this setting, see the official documentation at [Configure the default consistency level](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/how-to-manage-consistency#configure-the-default-consistency-level).
+
+## JDBC database recommendations
+
+In ScalarDB on JDBC databases, you can't choose a consistency level (`LINEARIZABLE`, `SEQUENTIAL` or `EVENTUAL`) in your code by using the `Operation.withConsistency()` method. In addition, the consistency level depends on the setup of your JDBC database.
+
+For example, if you have asynchronous read replicas in your setup and perform read operations against them, the consistency will be eventual because you can read stale data from the read replicas. On the other hand, if you perform all operations against a single master instance, the consistency will be linearizable.
+
+With this in mind, you must perform all operations or transactions against a single master instance so that you can achieve linearizability and avoid worrying about consistency issues in your application. In other words, ScalarDB does not support read replicas.
+
+{% capture notice--info %}
+**Note**
+
+You can still use a read replica as a backup and standby even when following this guideline.
+{% endcapture %}
+
+
{{ notice--info | markdownify }}
diff --git a/docs/3.12/scalar-kubernetes/AccessScalarProducts.md b/docs/3.12/scalar-kubernetes/AccessScalarProducts.md
new file mode 100644
index 00000000..b5f3d13e
--- /dev/null
+++ b/docs/3.12/scalar-kubernetes/AccessScalarProducts.md
@@ -0,0 +1,203 @@
+# Make ScalarDB or ScalarDL deployed in a Kubernetes cluster environment available from applications
+
+This document explains how to make ScalarDB or ScalarDL deployed in a Kubernetes cluster environment available from applications. To make ScalarDB or ScalarDL available from applications, you can use Scalar Envoy via a Kubernetes service resource named `-envoy`. You can use `-envoy` in several ways, such as:
+
+* Directly from inside the same Kubernetes cluster as ScalarDB or ScalarDL.
+* Via a load balancer from outside the Kubernetes cluster.
+* From a bastion server by using the `kubectl port-forward` command (for testing purposes only).
+
+The resource name `-envoy` is decided based on the helm release name. You can see the helm release name by running the `helm list` command.
+
+```console
+$ helm list -n ns-scalar
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+scalardb ns-scalar 1 2023-02-09 19:31:40.527130674 +0900 JST deployed scalardb-2.5.0 3.8.0
+scalardl-auditor ns-scalar 1 2023-02-09 19:32:03.008986045 +0900 JST deployed scalardl-audit-2.5.1 3.7.1
+scalardl-ledger ns-scalar 1 2023-02-09 19:31:53.459548418 +0900 JST deployed scalardl-4.5.1 3.7.1
+```
+
+You can also see the envoy service name `-envoy` by running the `kubectl get service` command.
+
+```console
+$ kubectl get service -n ns-scalar
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+scalardb-envoy LoadBalancer 10.99.245.143 60051:31110/TCP 2m2s
+scalardb-envoy-metrics ClusterIP 10.104.56.87 9001/TCP 2m2s
+scalardb-headless ClusterIP None 60051/TCP 2m2s
+scalardb-metrics ClusterIP 10.111.213.194 8080/TCP 2m2s
+scalardl-auditor-envoy LoadBalancer 10.111.141.43 40051:31553/TCP,40052:31171/TCP 99s
+scalardl-auditor-envoy-metrics ClusterIP 10.104.245.188 9001/TCP 99s
+scalardl-auditor-headless ClusterIP None 40051/TCP,40053/TCP,40052/TCP 99s
+scalardl-auditor-metrics ClusterIP 10.105.119.158 8080/TCP 99s
+scalardl-ledger-envoy LoadBalancer 10.96.239.167 50051:32714/TCP,50052:30857/TCP 109s
+scalardl-ledger-envoy-metrics ClusterIP 10.97.204.18 9001/TCP 109s
+scalardl-ledger-headless ClusterIP None 50051/TCP,50053/TCP,50052/TCP 109s
+scalardl-ledger-metrics ClusterIP 10.104.216.189 8080/TCP 109s
+```
+
+## Run application (client) requests to ScalarDB or ScalarDL via service resources directly from inside the same Kubernetes cluster
+
+If you deploy your application (client) in the same Kubernetes cluster as ScalarDB or ScalarDL (for example, if you deploy your application [client] on another node group or pool in the same Kubernetes cluster), the application can access ScalarDB or ScalarDL by using Kubernetes service resources. The format of the service resource name (FQDN) is `-envoy..svc.cluster.local`.
+
+The following are examples of ScalarDB and ScalarDL deployments on the `ns-scalar` namespace:
+
+* **ScalarDB Server**
+
+ ```console
+ scalardb-envoy.ns-scalar.svc.cluster.local
+ ```
+
+* **ScalarDL Ledger**
+
+ ```console
+ scalardl-ledger-envoy.ns-scalar.svc.cluster.local
+ ```
+
+* **ScalarDL Auditor**
+
+ ```console
+ scalardl-auditor-envoy.ns-scalar.svc.cluster.local
+ ```
+
+When using the Kubernetes service resource, you must set the above FQDN in the properties file for the application (client) as follows:
+
+* **Client properties file for ScalarDB Server**
+
+ ```properties
+ scalar.db.contact_points=-envoy..svc.cluster.local
+ scalar.db.contact_port=60051
+ scalar.db.storage=grpc
+ scalar.db.transaction_manager=grpc
+ ```
+
+* **Client properties file for ScalarDL Ledger**
+
+ ```properties
+ scalar.dl.client.server.host=-envoy..svc.cluster.local
+ scalar.dl.ledger.server.port=50051
+ scalar.dl.ledger.server.privileged_port=50052
+ ```
+
+* **Client properties file for ScalarDL Ledger with ScalarDL Auditor mode enabled**
+
+ ```properties
+ # Ledger
+ scalar.dl.client.server.host=-envoy..svc.cluster.local
+ scalar.dl.ledger.server.port=50051
+ scalar.dl.ledger.server.privileged_port=50052
+
+ # Auditor
+ scalar.dl.client.auditor.enabled=true
+ scalar.dl.client.auditor.host=