From b65a0b6e38c4611504952879b774367fd5f659d5 Mon Sep 17 00:00:00 2001 From: josh-wong Date: Tue, 14 Nov 2023 11:11:40 +0900 Subject: [PATCH 1/2] Add updated doc --- .../scalardb-fdw.md | 174 +++++ .../schema-importer.md | 60 ++ docs/3.10/schema-loader.md | 530 ++++++++----- docs/3.5/schema-loader.md | 541 +++++++++++++ docs/3.6/schema-loader.md | 615 +++++++++++++++ docs/3.7/schema-loader.md | 716 ++++++++++++++++++ docs/3.8/schema-loader.md | 548 +++++++++----- .../scalardb-fdw.md | 174 +++++ .../schema-importer.md | 60 ++ docs/3.9/schema-loader.md | 530 ++++++++----- .../scalardb-fdw.md | 174 +++++ .../schema-importer.md | 60 ++ docs/latest/schema-loader.md | 530 ++++++++----- 13 files changed, 3979 insertions(+), 733 deletions(-) create mode 100644 docs/3.10/scalardb-analytics-postgresql/scalardb-fdw.md create mode 100644 docs/3.10/scalardb-analytics-postgresql/schema-importer.md create mode 100644 docs/3.5/schema-loader.md create mode 100644 docs/3.6/schema-loader.md create mode 100644 docs/3.7/schema-loader.md create mode 100644 docs/3.9/scalardb-analytics-postgresql/scalardb-fdw.md create mode 100644 docs/3.9/scalardb-analytics-postgresql/schema-importer.md create mode 100644 docs/latest/scalardb-analytics-postgresql/scalardb-fdw.md create mode 100644 docs/latest/scalardb-analytics-postgresql/schema-importer.md diff --git a/docs/3.10/scalardb-analytics-postgresql/scalardb-fdw.md b/docs/3.10/scalardb-analytics-postgresql/scalardb-fdw.md new file mode 100644 index 00000000..78fe9787 --- /dev/null +++ b/docs/3.10/scalardb-analytics-postgresql/scalardb-fdw.md @@ -0,0 +1,174 @@ +# ScalarDB FDW + +ScalarDB FDW is a PostgreSQL extension that implements a foreign data wrapper (FDW) for [ScalarDB](https://www.scalar-labs.com/scalardb/). + +ScalarDB FDW uses the Java Native Interface to directly utilize ScalarDB as a library inside the FDW and read data from external databases via scan operations for ScalarDB. + +## Prerequisites + +You must have the following prerequisites set up in your environment. + +### JDK + +You must install a version of the Java Development Kit (JDK) that is compatible with ScalarDB. In addition, you must set the `JAVA_HOME` environment variable, which points to your JDK installation directory. + +Note that since these extensions use the Java Native Interface (JNI) internally, you must include the dynamic library of the Java virtual machine (JVM), such as `libjvm.so`, in the library search path. + +### PostgreSQL + +This extension supports PostgreSQL 13 or later. For details on how to install PostgreSQL, see the official documentation at [Server Administration](https://www.postgresql.org/docs/current/admin.html). + +## Build and installation + +You can build and install this extension by running the following command. + +```console +make install +``` + +### Common build errors + +This section describes some common build errors that you might encounter. + +#### ld: library not found for -ljvm + +Normally, the build script finds the path for `libjvm.so` and properly sets it as a library search path. However, if you encounter the error `ld: library not found for -ljvm`, please copy the `libjvm.so` file to the default library search path. For example: + +```console +ln -s //libjvm.so /usr/lib64/libjvm.so +``` + +## Usage + +This section provides a usage example and available options for FDW for ScalarDB. + +### Example + +The following example shows you how to install and create the necessary components, and then run a query by using the FDW extension. + +#### 1. Install the extension + +For details on how to install the extension, see the [Build and installation](#build-and-installation) section. + +#### 2. Create an extension + +To create an extension, run the following command: + +```sql +CREATE EXTENSION scalardb_fdw; +``` + +#### 3. Create a foreign server + +To create a foreign server, run the following command: + +```sql +CREATE SERVER scalardb FOREIGN DATA WRAPPER scalardb_fdw OPTIONS ( + config_file_path '/path/to/scalardb.properties' +); +``` + +#### 4. Create user mapping + +To create user mapping, run the following command: + +```sql +CREATE USER MAPPING FOR PUBLIC SERVER scalardb; +``` + +#### 5. Create a foreign table + +To create a foreign table, run the following command: + +```sql +CREATE FOREIGN TABLE sample_table ( + pk int, + ck1 int, + ck2 int, + boolean_col boolean, + bigint_col bigint, + float_col double precision, + double_col double precision, + text_col text, + blob_col bytea +) SERVER scalardb OPTIONS ( + namespace 'ns', + table_name 'sample_table' +); +``` + +#### 6. Run a query + +To run a query, run the following command: + +```sql +select * from sample_table; +``` + +### Available options + +You can set the following options for ScalarDB FDW objects. + +#### `CREATE SERVER` + +You can set the following options on a ScalarDB foreign server object: + +| Name | Required | Type | Description | +| ------------------ | -------- | -------- | --------------------------------------------------------------- | +| `config_file_path` | **Yes** | `string` | The path to the ScalarDB config file. | +| `max_heap_size` | No | `string` | The maximum heap size of JVM. The format is the same as `-Xmx`. | + +#### `CREATE USER MAPPING` + +Currently, no options exist for `CREATE USER MAPPING`. + +#### `CREATE FOREIGN SERVER` + +The following options can be set on a ScalarDB foreign table object: + +| Name | Required | Type | Description | +| ------------ | -------- | -------- | ---------------------------------------------------------------- | +| `namespace` | **Yes** | `string` | The name of the namespace of the table in the ScalarDB instance. | +| `table_name` | **Yes** | `string` | The name of the table in the ScalarDB instance. | + +### Data-type mapping + +| ScalarDB | PostgreSQL | +| -------- | ---------------- | +| BOOLEAN | boolean | +| INT | int | +| BIGINT | bigint | +| FLOAT | float | +| DOUBLE | double precision | +| TEXT | text | +| BLOB | bytea | + +## Testing + +This section describes how to test FDW for ScalarDB. + +### Set up a ScalarDB instance for testing + +Before testing FDW for ScalarDB, you must have a running ScalarDB instance that contains test data. You can set up the instance and load the test data by running the following commands: + +```console +./test/setup.sh +``` + +If you want to reset the instances, you can run the following command, then the above setup command again. + +```console +./test/cleanup.sh +``` + +### Run regression tests + +You can run regression tests by running the following command **after** you have installed the FDW extension. + +```console +make installcheck +``` + +## Limitations + +- This extension aims to enable analytical query processing on ScalarDB-managed databases. Therefore, this extension only supports reading data from ScalarDB. diff --git a/docs/3.10/scalardb-analytics-postgresql/schema-importer.md b/docs/3.10/scalardb-analytics-postgresql/schema-importer.md new file mode 100644 index 00000000..d25ae5c3 --- /dev/null +++ b/docs/3.10/scalardb-analytics-postgresql/schema-importer.md @@ -0,0 +1,60 @@ +# Schema Importer + +Schema Importer is a CLI tool for automatically configuring PostgreSQL. By using this tool, your PostgreSQL database can have identical database objects, such as namespaces and tables, as your ScalarDB instance. + +Schema Importer reads the ScalarDB configuration file, retrieves the schemas of the tables defined in ScalarDB, and creates the corresponding foreign data wrapper external tables and views in that order. For more information, refer to [Getting Started with ScalarDB Analytics with PostgreSQL](getting-started.md). + +## Build Schema Importer + +You can build Schema Importer by using [Gradle](https://gradle.org/). To build Schema Importer, run the following command: + +```console +./gradlew build +``` + +You may want to build a fat JAR file so that you can launch Schema Importer by using `java -jar`. To build the fat JAR, run the following command: + + ```console + ./gradlew shadowJar + ``` + +After you build the fat JAR, you can find the fat JAR file in the `app/build/libs/` directory. + +## Run Schema Importer + +To run Schema Importer by using the fat JAR file, run the following command: + +```console +java -jar +``` +Available options are as follows: + +| Name | Required | Description | Default | +| --------------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------ | +| `--config` | **Yes** | Path to the ScalarDB configuration file | | +| `--config-on-postgres-host` | No | Path to the ScalarDB configuration file on the PostgreSQL-running host | The same value as `--config` will be used. | +| `--namespace`, `-n` | **Yes** | Namespaces to import into the analytics instance. You can specify the `--namespace` option multiple times if you have two or more namespaces. | | +| `--host` | No | PostgreSQL host | localhost | +| `--port` | No | PostgreSQL port | 5432 | +| `--database` | No | PostgreSQL port | postgres | +| `--user` | No | PostgreSQL user | postgres | +| `--password` | No | PostgreSQL password | | +| `--debug` | No | Enable debug mode | | + + +## Test Schema Importer + +To test Schema Importer, run the following command: + +```console +./gradlew test +``` + +## Build a Docker image of Schema Importer + + +To build a Docker image of Schema Importer, run the following command, replacing `` with the tag version of Schema Importer that you want to use: + +```console +docker build -t ghcr.io/scalar-labs/scalardb-analytics-postgresql-schema-importer: -f ./app/Dockerfile . +``` diff --git a/docs/3.10/schema-loader.md b/docs/3.10/schema-loader.md index c20dcc4d..7c981fc9 100644 --- a/docs/3.10/schema-loader.md +++ b/docs/3.10/schema-loader.md @@ -1,50 +1,66 @@ # ScalarDB Schema Loader -ScalarDB has its own data model and schema, that maps to the implementation specific data model and schema. -Also, it stores internal metadata (e.g., transaction ID, record version, transaction status) for managing transaction logs and statuses when you use the Consensus Commit transaction manager. -It is a little hard for application developers to manage the schema mapping and metadata for transactions, so we offer a tool called ScalarDB Schema Loader for creating schema without requiring much knowledge about those. +ScalarDB has its own data model and schema that maps to the implementation-specific data model and schema. In addition, ScalarDB stores internal metadata, such as transaction IDs, record versions, and transaction statuses, to manage transaction logs and statuses when you use the Consensus Commit transaction manager. -There are two ways to specify general CLI options in Schema Loader: - - Pass a ScalarDB configuration file and database/storage-specific options additionally. - - Pass the options without a ScalarDB configuration (Deprecated). +Since managing the schema mapping and metadata for transactions can be difficult, you can use ScalarDB Schema Loader, which is a tool to create schemas that doesn't require you to need in-depth knowledge about schema mapping or metadata. -Note that this tool supports only basic options to create/delete/repair/alter a table. If you want -to use the advanced features of a database, please alter your tables with a database specific tool after creating them with this tool. +You have two options to specify general CLI options in Schema Loader: -# Usage +- Pass the ScalarDB properties file and database-specific or storage-specific options. +- Pass database-specific or storage-specific options without the ScalarDB properties file. (Deprecated) -## Install +{% capture notice--info %} +**Note** -The release versions of `schema-loader` can be downloaded from [releases](https://github.com/scalar-labs/scalardb/releases) page of ScalarDB. +This tool supports only basic options to create, delete, repair, or alter a table. If you want to use the advanced features of a database, you must alter your tables with a database-specific tool after creating the tables with this tool. +{% endcapture %} -## Build +
{{ notice--info | markdownify }}
-In case you want to build `schema-loader` from the source: -```console -$ ./gradlew schema-loader:shadowJar -``` -- The built fat jar file is `schema-loader/build/libs/scalardb-schema-loader-.jar` +## Set up Schema Loader -## Docker +Select your preferred method to set up Schema Loader, and follow the instructions. -You can pull the docker image from [Scalar's container registry](https://github.com/orgs/scalar-labs/packages/container/package/scalardb-schema-loader). -```console -docker run --rm -v : [-v :] ghcr.io/scalar-labs/scalardb-schema-loader: -``` -- Note that you can specify the same command arguments even if you use the fat jar or the container. The example commands in the next section are shown with a jar, but you can run the commands with the container in the same way by replacing `java -jar scalardb-schema-loader-.jar` with `docker run --rm -v : [-v :] ghcr.io/scalar-labs/scalardb-schema-loader:`. +
+
+ + +
+ +
+ +You can download the release versions of Schema Loader from the [ScalarDB Releases](https://github.com/scalar-labs/scalardb/releases) page. +
+
+ +You can pull the Docker image from the [Scalar container registry](https://github.com/orgs/scalar-labs/packages/container/package/scalardb-schema-loader) by running the following command, replacing the contents in the angle brackets as described: -You can also build the docker image as follows. ```console -$ ./gradlew schema-loader:docker +$ docker run --rm -v : [-v :] ghcr.io/scalar-labs/scalardb-schema-loader: ``` -## Run +{% capture notice--info %} +**Note** + +You can specify the same command arguments even if you use the fat JAR or the container. In the [Available commands](#available-commands) section, the JAR is used, but you can run the commands by using the container in the same way by replacing `java -jar scalardb-schema-loader-.jar` with `docker run --rm -v : [-v :] ghcr.io/scalar-labs/scalardb-schema-loader:`. +{% endcapture %} + +
{{ notice--info | markdownify }}
+
+
+ +## Run Schema Loader + +This section explains how to run Schema Loader. ### Available commands -For using a config file: +Select how you would like to configure Schema Loader for your database. The preferred method is to use the properties file since other, database-specific methods are deprecated. + +The following commands are available when using the properties file: + ```console -Usage: java -jar scalardb-schema-loader-.jar [-D] [--coordinator] +Usage: java -jar scalardb-schema-loader-.jar [-D] [--coordinator] [--no-backup] [--no-scaling] -c= [--compaction-strategy=] [-f=] [--replication-factor=] @@ -60,7 +76,7 @@ Create/Delete schemas in the storage defined in the config file --compaction-strategy= The compaction strategy, must be LCS, STCS or TWCS (supported in Cassandra) - --coordinator Create/delete/repair coordinator tables + --coordinator Create/delete/repair Coordinator tables -D, --delete-all Delete tables -f, --schema-file= Path to the schema json file @@ -78,9 +94,57 @@ Create/Delete schemas in the storage defined in the config file --ru= Base resource unit (supported in DynamoDB, Cosmos DB) ``` -For Cosmos DB for NoSQL (Deprecated. Please use the command using a config file instead): +For a sample properties file, see [`database.properties`](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties). + +{% capture notice--info %} +**Note** + +The following database-specific methods have been deprecated. Please use the [commands for configuring the properties file](#available-commands) instead. + +
+
+ + + + +
+ +
+ +```console +Usage: java -jar scalardb-schema-loader-.jar --cassandra [-D] + [-c=] -f= -h= + [-n=] [-p=] [-P=] + [-R=] [-u=] +Create/Delete Cassandra schemas + -A, --alter Alter tables : it will add new columns and create/delete + secondary index for existing tables. It compares the + provided table schema to the existing schema to decide + which columns need to be added and which indexes need + to be created or deleted + -c, --compaction-strategy= + Cassandra compaction strategy, must be LCS, STCS or TWCS + -D, --delete-all Delete tables + -f, --schema-file= + Path to the schema json file + -h, --host= Cassandra host IP + -n, --network-strategy= + Cassandra network strategy, must be SimpleStrategy or + NetworkTopologyStrategy + -p, --password= + Cassandra password + -P, --port= Cassandra Port + -R, --replication-factor= + Cassandra replication factor + --repair-all Repair tables : it repairs the table metadata of + existing tables + -u, --user= Cassandra user +``` +
+
+ ```console -Usage: java -jar scalardb-schema-loader-.jar --cosmos [-D] +Usage: java -jar scalardb-schema-loader-.jar --cosmos [-D] [--no-scaling] -f= -h= -p= [-r=] Create/Delete Cosmos DB schemas -A, --alter Alter tables : it will add new columns and create/delete @@ -99,10 +163,11 @@ Create/Delete Cosmos DB schemas existing tables and repairs stored procedure attached to each table ``` +
+
-For DynamoDB (Deprecated. Please use the command using a config file instead): ```console -Usage: java -jar scalardb-schema-loader-.jar --dynamo [-D] +Usage: java -jar scalardb-schema-loader-.jar --dynamo [-D] [--no-backup] [--no-scaling] [--endpoint-override=] -f= -p= [-r=] --region= -u= @@ -127,41 +192,11 @@ Create/Delete DynamoDB schemas existing tables -u, --user= AWS access key ID ``` +
+
-For Cassandra (Deprecated. Please use the command using a config file instead): ```console -Usage: java -jar scalardb-schema-loader-.jar --cassandra [-D] - [-c=] -f= -h= - [-n=] [-p=] [-P=] - [-R=] [-u=] -Create/Delete Cassandra schemas - -A, --alter Alter tables : it will add new columns and create/delete - secondary index for existing tables. It compares the - provided table schema to the existing schema to decide - which columns need to be added and which indexes need - to be created or deleted - -c, --compaction-strategy= - Cassandra compaction strategy, must be LCS, STCS or TWCS - -D, --delete-all Delete tables - -f, --schema-file= - Path to the schema json file - -h, --host= Cassandra host IP - -n, --network-strategy= - Cassandra network strategy, must be SimpleStrategy or - NetworkTopologyStrategy - -p, --password= - Cassandra password - -P, --port= Cassandra Port - -R, --replication-factor= - Cassandra replication factor - --repair-all Repair tables : it repairs the table metadata of - existing tables - -u, --user= Cassandra user -``` - -For a JDBC database (Deprecated. Please use the command using a config file instead): -```console -Usage: java -jar scalardb-schema-loader-.jar --jdbc [-D] +Usage: java -jar scalardb-schema-loader-.jar --jdbc [-D] -f= -j= -p= -u= Create/Delete JDBC schemas -A, --alter Alter tables : it will add new columns and create/delete @@ -179,142 +214,234 @@ Create/Delete JDBC schemas existing tables -u, --user= JDBC user ``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
### Create namespaces and tables -For using a config file (Sample config file can be found [here](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties)): +To create namespaces and tables by using a properties file, run the following command, replacing the contents in the angle brackets as described: + ```console -$ java -jar scalardb-schema-loader-.jar --config -f schema.json [--coordinator] +$ java -jar scalardb-schema-loader-.jar --config -f [--coordinator] ``` - - if `--coordinator` is specified, the coordinator tables will be created. -For using CLI arguments fully for configuration (Deprecated. Please use the command using a config file instead): +If `--coordinator` is specified, a [Coordinator table](api-guide.md#specify-operations-for-the-coordinator-table) will be created. + +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
+ ```console -# For Cosmos DB for NoSQL -$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f schema.json [-r BASE_RESOURCE_UNIT] +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f [-n ] [-R ] ``` - - `` you can use a primary key or a secondary key. - - `-r BASE_RESOURCE_UNIT` is an option. You can specify the RU of each database. The maximum RU in tables in the database will be set. If you don't specify RU of tables, the database RU will be set with this option. By default, it's 400. + +- If `-P ` is not supplied, it defaults to `9042`. +- If `-u ` is not supplied, it defaults to `cassandra`. +- If `-p ` is not supplied, it defaults to `cassandra`. +- `` should be `SimpleStrategy` or `NetworkTopologyStrategy` +
+
```console -# For DynamoDB -$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f schema.json [-r BASE_RESOURCE_UNIT] +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f [-r BASE_RESOURCE_UNIT] ``` - - `` should be a string to specify an AWS region like `ap-northeast-1`. - - `-r` option is almost the same as Cosmos DB for NoSQL option. However, the unit means DynamoDB capacity unit. The read and write capacity units are set the same value. + +- `` you can use a primary key or a secondary key. +- `-r BASE_RESOURCE_UNIT` is an option. You can specify the RU of each database. The maximum RU in tables in the database will be set. If you don't specify RU of tables, the database RU will be set with this option. By default, it's 400. +
+
```console -# For Cassandra -$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f schema.json [-n ] [-R ] +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f [-r BASE_RESOURCE_UNIT] ``` - - If `-P ` is not supplied, it defaults to `9042`. - - If `-u ` is not supplied, it defaults to `cassandra`. - - If `-p ` is not supplied, it defaults to `cassandra`. - - `` should be `SimpleStrategy` or `NetworkTopologyStrategy` + +- `` should be a string to specify an AWS region like `ap-northeast-1`. +- `-r` option is almost the same as Cosmos DB for NoSQL option. However, the unit means DynamoDB capacity unit. The read and write capacity units are set the same value. +
+
```console -# For a JDBC database -$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f schema.json +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f ``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
### Alter tables -This command will add new columns and create/delete secondary index for existing tables. It compares -the provided table schema to the existing schema to decide which columns need to be added and which -indexes need to be created or deleted. +You can use a command to add new columns to and create or delete a secondary index for existing tables. This command compares the provided table schema to the existing schema to decide which columns need to be added and which indexes need to be created or deleted. -For using config file (Sample config file can be found [here](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties)): +To add new colums to and create or delete a secondary index for existing tables, run the following command, replacing the contents in the angle brackets as described: ```console -$ java -jar scalardb-schema-loader-.jar --config -f schema.json --alter +$ java -jar scalardb-schema-loader-.jar --config -f --alter ``` -For using CLI arguments fully for configuration (Deprecated. Please use the command using a config -file instead): +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
```console -# For Cosmos DB for NoSQL -$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f schema.json --alter +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f --alter ``` +
+
```console -# For DynamoDB -$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f schema.json --alter +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f --alter ``` +
+
```console -# For Cassandra -$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f schema.json --alter +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f --alter ``` +
+
```console -# For a JDBC database -$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f schema.json --alter +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f --alter ``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
### Delete tables -For using config file (Sample config file can be found [here](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties)): +You can delete tables by using the properties file. To delete tables, run the following command, replacing the contents in the angle brackets as described: + ```console -$ java -jar scalardb-schema-loader-.jar --config -f schema.json [--coordinator] -D +$ java -jar scalardb-schema-loader-.jar --config -f [--coordinator] -D ``` - - if `--coordinator` is specified, the coordinator tables will be deleted. - -For using CLI arguments fully for configuration (Deprecated. Please use the command using a config file instead): + +If `--coordinator` is specified, the Coordinator table will be deleted as well. + +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
+ ```console -# For Cosmos DB for NoSQL -$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f schema.json -D +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f -D ``` +
+
```console -# For DynamoDB -$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f schema.json -D +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f -D ``` +
+
```console -# For Cassandra -$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f schema.json -D +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f -D ``` +
+
```console -# For a JDBC database -$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f schema.json -D +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f -D ``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
### Repair tables -This command will repair the table metadata of existing tables. When using Cosmos DB for NoSQL, it additionally repairs stored procedure attached to each table. +You can repair the table metadata of existing tables by using the properties file. To repair table metadata of existing tables, run the following command, replacing the contents in the angle brackets as described: -For using config file (Sample config file can be found [here](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties)): ```console -$ java -jar scalardb-schema-loader-.jar --config -f schema.json [--coordinator] --repair-all +$ java -jar scalardb-schema-loader-.jar --config -f [--coordinator] --repair-all ``` -- if `--coordinator` is specified, the coordinator tables will be repaired as well. -For using CLI arguments fully for configuration (Deprecated. Please use the command using a config file instead): +If `--coordinator` is specified, the Coordinator table will be repaired as well. In addition, if you're using Cosmos DB for NoSQL, running this command will also repair stored procedures attached to each table. + +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
+ ```console -# For Cosmos DB for NoSQL -$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f schema.json --repair-all +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f --repair-all ``` +
+
```console -# For DynamoDB -$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region [--no-backup] -f schema.json --repair-all +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f --repair-all ``` +
+
```console -# For Cassandra -$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f schema.json --repair-all +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region [--no-backup] -f --repair-all ``` +
+
```console -# For a JDBC database -$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f schema.json --repair-all +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f --repair-all ``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
### Sample schema file -The sample schema is as follows (Sample schema file can be found [here](https://github.com/scalar-labs/scalardb/blob/master/schema-loader/sample/schema_sample.json)): +The following is a sample schema. For a sample schema file, see [`schema_sample.json`](https://github.com/scalar-labs/scalardb/blob/master/schema-loader/sample/schema_sample.json). ```json { @@ -379,14 +506,17 @@ The sample schema is as follows (Sample schema file can be found [here](https:// ``` The schema has table definitions that include `columns`, `partition-key`, `clustering-key`, `secondary-index`, and `transaction` fields. -The `columns` field defines columns of the table and their data types. -The `partition-key` field defines which columns the partition key is composed of, and `clustering-key` defines which columns the clustering key is composed of. -The `secondary-index` field defines which columns are indexed. -The `transaction` field indicates whether the table is for transactions or not. -If you set the `transaction` field to `true` or don't specify the `transaction` field, this tool creates a table with transaction metadata if needed. -If not, it creates a table without any transaction metadata (that is, for a table with [Storage API](storage-abstraction.md)). - -You can also specify database/storage-specific options in the table definition as follows: + +- The `columns` field defines columns of the table and their data types. +- The `partition-key` field defines which columns the partition key is composed of. +- The `clustering-key` field defines which columns the clustering key is composed of. +- The `secondary-index` field defines which columns are indexed. +- The `transaction` field indicates whether the table is for transactions or not. + - If you set the `transaction` field to `true` or don't specify the `transaction` field, this tool creates a table with transaction metadata if needed. + - If you set the `transaction` field to `false`, this tool creates a table without any transaction metadata (that is, for a table with [Storage API](storage-abstraction.md)). + +You can also specify database or storage-specific options in the table definition as follows: + ```json { "sample_db.sample_table3": { @@ -404,30 +534,68 @@ You can also specify database/storage-specific options in the table definition a } ``` -The database/storage-specific options you can specify are as follows: +The database or storage-specific options you can specify are as follows: -For Cassandra: -- `compaction-strategy`, a compaction strategy. It should be `STCS` (SizeTieredCompaction), `LCS` (LeveledCompactionStrategy) or `TWCS` (TimeWindowCompactionStrategy). +
+
+ + + + +
-For DynamoDB and Cosmos DB for NoSQL: -- `ru`, a request unit. Please see [RU](#ru) for the details. +
-## Scaling Performance +The `compaction-strategy` option is the compaction strategy used. This option should be `STCS` (SizeTieredCompaction), `LCS` (LeveledCompactionStrategy), or `TWCS` (TimeWindowCompactionStrategy). +
+
-### RU +The `ru` option stands for Request Units. For details, see [RUs](#rus). +
+
-You can scale the throughput of Cosmos DB for NoSQL and DynamoDB by specifying `--ru` option (which applies to all the tables) or `ru` parameter for each table. The default values are `400` for Cosmos DB for NoSQL and `10` for DynamoDB respectively, which are set without `--ru` option. +The `ru` option stands for Request Units. For details, see [RUs](#rus). +
+
-Note that the schema loader abstracts [Request Unit](https://docs.microsoft.com/azure/cosmos-db/request-units) of Cosmos DB for NoSQL and [Capacity Unit](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual) of DynamoDB with `RU`. -So, please set an appropriate value depending on the database implementations. Please also note that the schema loader sets the same value to both Read Capacity Unit and Write Capacity Unit for DynamoDB. +No options are available for JDBC databases. +
+
+ +## Scale for performance when using Cosmos DB for NoSQL or DynamoDB + +When using Cosmos DB for NoSQL or DynamoDB, you can scale by using Request Units (RUs) or auto-scaling. + +### RUs + +You can scale the throughput of Cosmos DB for NoSQL and DynamoDB by specifying the `--ru` option. When specifying this option, scaling applies to all tables or the `ru` parameter for each table. + +If the `--ru` option is not set, the default values will be `400` for Cosmos DB for NoSQL and `10` for DynamoDB. + +{% capture notice--info %} +**Note** + +- Schema Loader abstracts [Request Units](https://docs.microsoft.com/azure/cosmos-db/request-units) for Cosmos DB for NoSQL and [Capacity Units](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual) for DynamoDB with `RU`. Therefore, be sure to set an appropriate value depending on the database implementation. +- Be aware that Schema Loader sets the same value to both read capacity unit and write capacity unit for DynamoDB. +{% endcapture %} + +
{{ notice--info | markdownify }}
### Auto-scaling -By default, the schema loader enables auto-scaling of RU for all tables: RU is scaled in or out between 10% and 100% of a specified RU depending on a workload. For example, if you specify `-r 10000`, RU of each table is scaled in or out between 1000 and 10000. Note that auto-scaling of Cosmos DB for NoSQL is enabled only when you set more than or equal to 4000 RU. +By default, Schema Loader enables auto-scaling of RUs for all tables: RUs scale between 10 percent and 100 percent of a specified RU depending on the workload. For example, if you specify `-r 10000`, the RUs of each table auto-scales between `1000` and `10000`. -## Data type mapping between ScalarDB and the other databases +{% capture notice--info %} +**Note** -Here are the supported data types in ScalarDB and their mapping to the data types of other databases. +Auto-scaling for Cosmos DB for NoSQL is enabled only when this option is set to `4000` or more. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Data-type mapping between ScalarDB and other databases + +The following table shows the supported data types in ScalarDB and their mapping to the data types of other databases. | ScalarDB | Cassandra | Cosmos DB for NoSQL | DynamoDB | MySQL | PostgreSQL | Oracle | SQL Server | SQLite | |-----------|-----------|---------------------|----------|----------|------------------|----------------|-----------------|---------| @@ -439,48 +607,47 @@ Here are the supported data types in ScalarDB and their mapping to the data type | TEXT | text | string (JSON) | S | longtext | text | varchar2(4000) | varchar(8000) | text | | BLOB | blob | string (JSON) | B | longblob | bytea | RAW(2000) | varbinary(8000) | blob | -However, the following types in JDBC databases are converted differently when they are used as a primary key or a secondary index key due to the limitations of RDB data types. +However, the following data types in JDBC databases are converted differently when they are used as a primary key or a secondary index key. This is due to the limitations of RDB data types. | ScalarDB | MySQL | PostgreSQL | Oracle | |----------|---------------|-------------------|--------------| | TEXT | VARCHAR(64) | VARCHAR(10485760) | VARCHAR2(64) | | BLOB | VARBINARY(64) | | RAW(64) | -The value range of `BIGINT` in ScalarDB is from -2^53 to 2^53 regardless of the underlying database. +The value range of `BIGINT` in ScalarDB is from -2^53 to 2^53, regardless of the underlying database. -If this data type mapping doesn't match your application, please alter the tables to change the data types after creating them with this tool. +If this data-type mapping doesn't match your application, please alter the tables to change the data types after creating them by using this tool. ## Internal metadata for Consensus Commit -The Consensus Commit transaction manager manages metadata (e.g., transaction ID, record version, transaction status) stored along with the actual records to handle transactions properly. -Thus, along with any required columns by the application, additional columns for the metadata need to be defined in the schema. -Additionaly, this tool creates a table with the metadata when you use the Consensus Commit transaction manager. +The Consensus Commit transaction manager manages metadata (for example, transaction ID, record version, and transaction status) stored along with the actual records to handle transactions properly. + +Thus, along with any columns that the application requires, additional columns for the metadata need to be defined in the schema. Additionally, this tool creates a table with the metadata if you use the Consensus Commit transaction manager. + +## Use Schema Loader in your application + +You can check the version of Schema Loader from the [Maven Central Repository](https://mvnrepository.com/artifact/com.scalar-labs/scalardb-schema-loader). For example in Gradle, you can add the following dependency to your `build.gradle` file, replacing `` with the version of Schema Loader that you want to use: -## Using Schema Loader in your program -You can check the version of `schema-loader` from [maven central repository](https://mvnrepository.com/artifact/com.scalar-labs/scalardb-schema-loader). -For example in Gradle, you can add the following dependency to your build.gradle. Please replace the `` with the version you want to use. ```gradle dependencies { - implementation 'com.scalar-labs:scalardb-schema-loader:' + implementation 'com.scalar-labs:scalardb-schema-loader:' } ``` -### Create, alter, repair and delete +### Create, alter, repair, or delete tables -You can create, alter, delete and repair tables that are defined in the schema using SchemaLoader by -simply passing ScalarDB configuration file, schema, and additional options if needed as shown -below. +You can create, alter, delete, or repair tables that are defined in the schema by using Schema Loader. To do this, you can pass a ScalarDB properties file, schema, and additional options, if needed, as shown below: ```java public class SchemaLoaderSample { public static int main(String... args) throws SchemaLoaderException { Path configFilePath = Paths.get("database.properties"); - // "sample_schema.json" and "altered_sample_schema.json" can be found in the "/sample" directory + // "sample_schema.json" and "altered_sample_schema.json" can be found in the "/sample" directory. Path schemaFilePath = Paths.get("sample_schema.json"); Path alteredSchemaFilePath = Paths.get("altered_sample_schema.json"); - boolean createCoordinatorTables = true; // whether to create the coordinator tables or not - boolean deleteCoordinatorTables = true; // whether to delete the coordinator tables or not - boolean repairCoordinatorTables = true; // whether to repair the coordinator tables or not + boolean createCoordinatorTables = true; // whether to create the Coordinator table or not + boolean deleteCoordinatorTables = true; // whether to delete the Coordinator table or not + boolean repairCoordinatorTables = true; // whether to repair the Coordinator table or not Map tableCreationOptions = new HashMap<>(); @@ -499,16 +666,16 @@ public class SchemaLoaderSample { Map tableReparationOptions = new HashMap<>(); indexCreationOptions.put(DynamoAdmin.NO_BACKUP, "true"); - // Create tables + // Create tables. SchemaLoader.load(configFilePath, schemaFilePath, tableCreationOptions, createCoordinatorTables); - // Alter tables + // Alter tables. SchemaLoader.alterTables(configFilePath, alteredSchemaFilePath, indexCreationOptions); - // Repair tables + // Repair tables. SchemaLoader.repairTables(configFilePath, schemaFilePath, tableReparationOptions, repairCoordinatorTables); - // Delete tables + // Delete tables. SchemaLoader.unload(configFilePath, schemaFilePath, deleteCoordinatorTables); return 0; @@ -516,33 +683,34 @@ public class SchemaLoaderSample { } ``` -You can also create, delete or repair a schema by passing a serialized schema JSON string (the raw text of a schema file). +You can also create, delete, or repair a schema by passing a serialized-schema JSON string (the raw text of a schema file) as shown below: + ```java -// Create tables +// Create tables. SchemaLoader.load(configFilePath, serializedSchemaJson, tableCreationOptions, createCoordinatorTables); -// Alter tables +// Alter tables. SchemaLoader.alterTables(configFilePath, serializedAlteredSchemaFilePath, indexCreationOptions); -// Repair tables +// Repair tables. SchemaLoader.repairTables(configFilePath, serializedSchemaJson, tableReparationOptions, repairCoordinatorTables); -// Delete tables +// Delete tables. SchemaLoader.unload(configFilePath, serializedSchemaJson, deleteCoordinatorTables); ``` -For ScalarDB configuration, a `Properties` object can be used as well. +When configuring ScalarDB, you can use a `Properties` object as well, as shown below: ```java -// Create tables +// Create tables. SchemaLoader.load(properties, serializedSchemaJson, tableCreationOptions, createCoordinatorTables); -// Alter tables +// Alter tables. SchemaLoader.alterTables(properties, serializedAlteredSchemaFilePath, indexCreationOptions); -// Repair tables +// Repair tables. SchemaLoader.repairTables(properties, serializedSchemaJson, tableReparationOptions, repairCoordinatorTables); -// Delete tables +// Delete tables. SchemaLoader.unload(properties, serializedSchemaJson, deleteCoordinatorTables); ``` diff --git a/docs/3.5/schema-loader.md b/docs/3.5/schema-loader.md new file mode 100644 index 00000000..10b321a4 --- /dev/null +++ b/docs/3.5/schema-loader.md @@ -0,0 +1,541 @@ +# ScalarDB Schema Loader + +ScalarDB has its own data model and schema that maps to the implementation-specific data model and schema. In addition, ScalarDB stores internal metadata, such as transaction IDs, record versions, and transaction statuses, to manage transaction logs and statuses when you use the Consensus Commit transaction manager. + +Since managing the schema mapping and metadata for transactions can be difficult, you can use ScalarDB Schema Loader, which is a tool to create schemas that doesn't require you to need in-depth knowledge about schema mapping or metadata. + +You have two options to specify general CLI options in Schema Loader: + +- Pass the ScalarDB properties file and database-specific or storage-specific options. +- Pass database-specific or storage-specific options without the ScalarDB properties file. (Deprecated) + +{% capture notice--info %} +**Note** + +This tool supports only basic options to create, delete, or repair a table. If you want to use the advanced features of a database, you must alter your tables with a database-specific tool after creating the tables with this tool. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Set up Schema Loader + +Select your preferred method to set up Schema Loader, and follow the instructions. + +
+
+ + +
+ +
+ +You can download the release versions of Schema Loader from the [ScalarDB Releases](https://github.com/scalar-labs/scalardb/releases) page. +
+
+ +You can pull the Docker image from the [Scalar container registry](https://github.com/orgs/scalar-labs/packages/container/package/scalardb-schema-loader) by running the following command, replacing the contents in the angle brackets as described: + +```console +$ docker run --rm -v : [-v :] ghcr.io/scalar-labs/scalardb-schema-loader: +``` + +{% capture notice--info %} +**Note** + +You can specify the same command arguments even if you use the fat JAR or the container. In the [Available commands](#available-commands) section, the JAR is used, but you can run the commands by using the container in the same way by replacing `java -jar scalardb-schema-loader-.jar` with `docker run --rm -v : [-v :] ghcr.io/scalar-labs/scalardb-schema-loader:`. +{% endcapture %} + +
{{ notice--info | markdownify }}
+
+
+ +## Run Schema Loader + +This section explains how to run Schema Loader. + +### Available commands + +Select how you would like to configure Schema Loader for your database. The preferred method is to use the properties file since other, database-specific methods are deprecated. + +The following commands are available when using the properties file: + +```console +Usage: java -jar scalardb-schema-loader-.jar [-D] [--coordinator] + [--no-backup] [--no-scaling] -c= + [--compaction-strategy=] [-f=] + [--replication-factor=] + [--replication-strategy=] [--ru=] +Create/Delete schemas in the storage defined in the config file + -c, --config= + Path to the config file of Scalar DB + --compaction-strategy= + The compaction strategy, must be LCS, STCS or TWCS + (supported in Cassandra) + --coordinator Create/delete coordinator table + -D, --delete-all Delete tables + -f, --schema-file= + Path to the schema json file + --no-backup Disable continuous backup (supported in DynamoDB) + --no-scaling Disable auto-scaling (supported in DynamoDB, Cosmos DB) + --replication-factor= + The replication factor (supported in Cassandra) + --replication-strategy= + The replication strategy, must be SimpleStrategy or + NetworkTopologyStrategy (supported in Cassandra) + --ru= Base resource unit (supported in DynamoDB, Cosmos DB) +``` + +For a sample properties file, see [`database.properties`](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties). + +{% capture notice--info %} +**Note** + +The following database-specific methods have been deprecated. Please use the [commands for configuring the properties file](#available-commands) instead. + +
+
+ + + + +
+ +
+ +```console +Usage: java -jar scalardb-schema-loader-.jar --cassandra [-D] + [-c=] -f= -h= + [-n=] [-p=] [-P=] + [-R=] [-u=] +Create/Delete Cassandra schemas + -c, --compaction-strategy= + Cassandra compaction strategy, must be LCS, STCS or TWCS + -D, --delete-all Delete tables + -f, --schema-file= + Path to the schema json file + -h, --host= Cassandra host IP + -n, --network-strategy= + Cassandra network strategy, must be SimpleStrategy or + NetworkTopologyStrategy + -p, --password= + Cassandra password + -P, --port= Cassandra Port + -R, --replication-factor= + Cassandra replication factor + -u, --user= Cassandra user +``` +
+
+ +```console +Usage: java -jar scalardb-schema-loader-.jar --cosmos [-D] + [--no-scaling] -f= -h= -p= [-r=] +Create/Delete Cosmos DB schemas + -D, --delete-all Delete tables + -f, --schema-file= + Path to the schema json file + -h, --host= Cosmos DB account URI + --no-scaling Disable auto-scaling for Cosmos DB + -p, --password= Cosmos DB key + -r, --ru= Base resource unit +``` +
+
+ +```console +Usage: java -jar scalardb-schema-loader-.jar --dynamo [-D] + [--no-backup] [--no-scaling] [--endpoint-override=] + -f= -p= [-r=] --region= + -u= +Create/Delete DynamoDB schemas + -D, --delete-all Delete tables + --endpoint-override= + Endpoint with which the DynamoDB SDK should + communicate + -f, --schema-file= + Path to the schema json file + --no-backup Disable continuous backup for DynamoDB + --no-scaling Disable auto-scaling for DynamoDB + -p, --password= AWS access secret key + -r, --ru= Base resource unit + --region= AWS region + -u, --user= AWS access key ID +``` +
+
+ +```console +Usage: java -jar scalardb-schema-loader-.jar --jdbc [-D] + -f= -j= -p= -u= +Create/Delete JDBC schemas + -D, --delete-all Delete tables + -f, --schema-file= + Path to the schema json file + -j, --jdbc-url= JDBC URL + -p, --password= + JDBC password + -u, --user= JDBC user +``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Create namespaces and tables + +To create namespaces and tables by using a properties file, run the following command, replacing the contents in the angle brackets as described: + +```console +$ java -jar scalardb-schema-loader-.jar --config -f [--coordinator] +``` + +If `--coordinator` is specified, a [Coordinator table](api-guide.md#specify-operations-for-the-coordinator-table) will be created. + +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
+ +```console +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f [-n ] [-R ] +``` + +- If `-P ` is not supplied, it defaults to `9042`. +- If `-u ` is not supplied, it defaults to `cassandra`. +- If `-p ` is not supplied, it defaults to `cassandra`. +- `` should be `SimpleStrategy` or `NetworkTopologyStrategy` +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f [-r BASE_RESOURCE_UNIT] +``` + +- `` you can use a primary key or a secondary key. +- `-r BASE_RESOURCE_UNIT` is an option. You can specify the RU of each database. The maximum RU in tables in the database will be set. If you don't specify RU of tables, the database RU will be set with this option. By default, it's 400. +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f [-r BASE_RESOURCE_UNIT] +``` + +- `` should be a string to specify an AWS region like `ap-northeast-1`. +- `-r` option is almost the same as Cosmos DB for NoSQL option. However, the unit means DynamoDB capacity unit. The read and write capacity units are set the same value. +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f +``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Delete tables + +You can delete tables by using the properties file. To delete tables, run the following command, replacing the contents in the angle brackets as described: + +```console +$ java -jar scalardb-schema-loader-.jar --config -f [--coordinator] -D +``` + +If `--coordinator` is specified, the Coordinator table will be deleted as well. + +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
+ +```console +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f -D +``` +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f -D +``` +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f -D +``` +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f -D +``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Sample schema file + +The following is a sample schema. For a sample schema file, see [`schema_sample.json`](https://github.com/scalar-labs/scalardb/blob/master/schema-loader/sample/schema_sample.json). + +```json +{ + "sample_db.sample_table": { + "transaction": false, + "partition-key": [ + "c1" + ], + "clustering-key": [ + "c4 ASC", + "c6 DESC" + ], + "columns": { + "c1": "INT", + "c2": "TEXT", + "c3": "BLOB", + "c4": "INT", + "c5": "BOOLEAN", + "c6": "INT" + }, + "secondary-index": [ + "c2", + "c4" + ] + }, + + "sample_db.sample_table1": { + "transaction": true, + "partition-key": [ + "c1" + ], + "clustering-key": [ + "c4" + ], + "columns": { + "c1": "INT", + "c2": "TEXT", + "c3": "INT", + "c4": "INT", + "c5": "BOOLEAN" + } + }, + + "sample_db.sample_table2": { + "transaction": false, + "partition-key": [ + "c1" + ], + "clustering-key": [ + "c4", + "c3" + ], + "columns": { + "c1": "INT", + "c2": "TEXT", + "c3": "INT", + "c4": "INT", + "c5": "BOOLEAN" + } + } +} +``` + +The schema has table definitions that include `columns`, `partition-key`, `clustering-key`, `secondary-index`, and `transaction` fields. + +- The `columns` field defines columns of the table and their data types. +- The `partition-key` field defines which columns the partition key is composed of. +- The `clustering-key` field defines which columns the clustering key is composed of. +- The `secondary-index` field defines which columns are indexed. +- The `transaction` field indicates whether the table is for transactions or not. + - If you set the `transaction` field to `true` or don't specify the `transaction` field, this tool creates a table with transaction metadata if needed. + - If you set the `transaction` field to `false`, this tool creates a table without any transaction metadata (that is, for a table with [Storage API](storage-abstraction.md)). + +You can also specify database or storage-specific options in the table definition as follows: + +```json +{ + "sample_db.sample_table3": { + "partition-key": [ + "c1" + ], + "columns": { + "c1": "INT", + "c2": "TEXT", + "c3": "BLOB" + }, + "compaction-strategy": "LCS", + "ru": 5000 + } +} +``` + +The database or storage-specific options you can specify are as follows: + +
+
+ + + + +
+ +
+ +The `compaction-strategy` option is the compaction strategy used. This option should be `STCS` (SizeTieredCompaction), `LCS` (LeveledCompactionStrategy), or `TWCS` (TimeWindowCompactionStrategy). +
+
+ +The `ru` option stands for Request Units. For details, see [RUs](#rus). +
+
+ +The `ru` option stands for Request Units. For details, see [RUs](#rus). +
+
+ +No options are available for JDBC databases. +
+
+ +## Scale for performance when using Cosmos DB for NoSQL or DynamoDB + +When using Cosmos DB for NoSQL or DynamoDB, you can scale by using Request Units (RUs) or auto-scaling. + +### RUs + +You can scale the throughput of Cosmos DB for NoSQL and DynamoDB by specifying the `--ru` option. When specifying this option, scaling applies to all tables or the `ru` parameter for each table. + +If the `--ru` option is not set, the default values will be `400` for Cosmos DB for NoSQL and `10` for DynamoDB. + +{% capture notice--info %} +**Note** + +- Schema Loader abstracts [Request Units](https://docs.microsoft.com/azure/cosmos-db/request-units) for Cosmos DB for NoSQL and [Capacity Units](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual) for DynamoDB with `RU`. Therefore, be sure to set an appropriate value depending on the database implementation. +- Be aware that Schema Loader sets the same value to both read capacity unit and write capacity unit for DynamoDB. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Auto-scaling + +By default, Schema Loader enables auto-scaling of RUs for all tables: RUs scale between 10 percent and 100 percent of a specified RU depending on the workload. For example, if you specify `-r 10000`, the RUs of each table auto-scales between `1000` and `10000`. + +{% capture notice--info %} +**Note** + +Auto-scaling for Cosmos DB for NoSQL is enabled only when this option is set to `4000` or more. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Data-type mapping between ScalarDB and other databases + +The following table shows the supported data types in ScalarDB and their mapping to the data types of other databases. + +| ScalarDB | MySQL | PostgreSQL | Oracle | SQL Server | +|-----------|----------|------------------|----------------|-----------------| +| BOOLEAN | boolean | boolean | number(1) | bit | +| INT | int | int | int | int | +| BIGINT | bigint | bigint | number(19) | bigint | +| FLOAT | double | float | binary_float | float(24) | +| DOUBLE | double | double precision | binary_double | float | +| TEXT | longtext | text | varchar2(4000) | varchar(8000) | +| BLOB | longblob | bytea | RAW(2000) | varbinary(8000) | + +However, the following data types in JDBC databases are converted differently when they are used as a primary key or a secondary index key. This is due to the limitations of RDB data types. + +| ScalarDB | MySQL | PostgreSQL | Oracle | +|----------|---------------|-------------------|--------------| +| TEXT | VARCHAR(64) | VARCHAR(10485760) | VARCHAR2(64) | +| BLOB | VARBINARY(64) | | RAW(64) | + +The value range of `BIGINT` in ScalarDB is from -2^53 to 2^53, regardless of the underlying database. + +If this data-type mapping doesn't match your application, please alter the tables to change the data types after creating them by using this tool. + +## Use Schema Loader in your application + +You can check the version of Schema Loader from the [Maven Central Repository](https://mvnrepository.com/artifact/com.scalar-labs/scalardb-schema-loader). For example in Gradle, you can add the following dependency to your `build.gradle` file, replacing `` with the version of Schema Loader that you want to use: + +```gradle +dependencies { + implementation group: 'com.scalar-labs', name: 'scalardb-schema-loader', version: '' +} +``` + +### Create or delete tables + +You can create or delete tables that are defined in the schema by using Schema Loader. To do this, you can pass a ScalarDB properties file, schema, and additional options, if needed, as shown below: + +```java +public class SchemaLoaderSample { + public static int main(String... args) throws SchemaLoaderException { + Path configFilePath = Paths.get("database.properties"); + Path schemaFilePath = Paths.get("sample_schema.json"); + boolean createCoordinatorTable = true; // whether creating the coordinator table or not + boolean deleteCoordinatorTable = true; // whether deleting the coordinator table or not + + Map options = new HashMap<>(); + + options.put( + CassandraAdmin.REPLICATION_STRATEGY, ReplicationStrategy.SIMPLE_STRATEGY.toString()); + options.put(CassandraAdmin.COMPACTION_STRATEGY, CompactionStrategy.LCS.toString()); + options.put(CassandraAdmin.REPLICATION_FACTOR, "1"); + + options.put(DynamoAdmin.REQUEST_UNIT, "1"); + options.put(DynamoAdmin.NO_SCALING, "true"); + options.put(DynamoAdmin.NO_BACKUP, "true"); + + // Create tables + SchemaLoader.load(configFilePath, schemaFilePath, options, createCoordinatorTable); + + // Delete tables + SchemaLoader.unload(configFilePath, schemaFilePath, deleteCoordinatorTable); + + return 0; + } +} +``` + +You can also create or delete a schema by passing a serialized-schema JSON string (the raw text of a schema file) as shown below: + +```java +// Create tables +SchemaLoader.load(configFilePath, serializedSchemaJson, options, createCoordinatorTable); + +// Delete tables +SchemaLoader.unload(configFilePath, serializedSchemaJson, deleteCoordinatorTable); +``` + +When configuring ScalarDB, you can use a `Properties` object as well, as shown below: + +```java +// Create tables +SchemaLoader.load(properties, serializedSchemaJson, options, createCoordinatorTable); + +// Delete tables +SchemaLoader.unload(properties, serializedSchemaJson, deleteCoordinatorTable); +``` diff --git a/docs/3.6/schema-loader.md b/docs/3.6/schema-loader.md new file mode 100644 index 00000000..570fbf78 --- /dev/null +++ b/docs/3.6/schema-loader.md @@ -0,0 +1,615 @@ +# ScalarDB Schema Loader + +ScalarDB has its own data model and schema that maps to the implementation-specific data model and schema. In addition, ScalarDB stores internal metadata, such as transaction IDs, record versions, and transaction statuses, to manage transaction logs and statuses when you use the Consensus Commit transaction manager. + +Since managing the schema mapping and metadata for transactions can be difficult, you can use ScalarDB Schema Loader, which is a tool to create schemas that doesn't require you to need in-depth knowledge about schema mapping or metadata. + +You have two options to specify general CLI options in Schema Loader: + +- Pass the ScalarDB properties file and database-specific or storage-specific options. +- Pass database-specific or storage-specific options without the ScalarDB properties file. (Deprecated) + +{% capture notice--info %} +**Note** + +This tool supports only basic options to create, delete, or repair a table. If you want to use the advanced features of a database, you must alter your tables with a database-specific tool after creating the tables with this tool. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Set up Schema Loader + +Select your preferred method to set up Schema Loader, and follow the instructions. + +
+
+ + +
+ +
+ +You can download the release versions of Schema Loader from the [ScalarDB Releases](https://github.com/scalar-labs/scalardb/releases) page. +
+
+ +You can pull the Docker image from the [Scalar container registry](https://github.com/orgs/scalar-labs/packages/container/package/scalardb-schema-loader) by running the following command, replacing the contents in the angle brackets as described: + +```console +$ docker run --rm -v : [-v :] ghcr.io/scalar-labs/scalardb-schema-loader: +``` + +{% capture notice--info %} +**Note** + +You can specify the same command arguments even if you use the fat JAR or the container. In the [Available commands](#available-commands) section, the JAR is used, but you can run the commands by using the container in the same way by replacing `java -jar scalardb-schema-loader-.jar` with `docker run --rm -v : [-v :] ghcr.io/scalar-labs/scalardb-schema-loader:`. +{% endcapture %} + +
{{ notice--info | markdownify }}
+
+
+ +## Run Schema Loader + +This section explains how to run Schema Loader. + +### Available commands + +Select how you would like to configure Schema Loader for your database. The preferred method is to use the properties file since other, database-specific methods are deprecated. + +The following commands are available when using the properties file: + +```console +Usage: java -jar scalardb-schema-loader-.jar [-D] [--coordinator] + [--no-backup] [--no-scaling] -c= + [--compaction-strategy=] [-f=] + [--replication-factor=] + [--replication-strategy=] [--ru=] +Create/Delete schemas in the storage defined in the config file + -c, --config= + Path to the config file of ScalarDB + --compaction-strategy= + The compaction strategy, must be LCS, STCS or TWCS + (supported in Cassandra) + --coordinator Create/delete coordinator tables + -D, --delete-all Delete tables + -f, --schema-file= + Path to the schema json file + --no-backup Disable continuous backup (supported in DynamoDB) + --no-scaling Disable auto-scaling (supported in DynamoDB, Cosmos DB) + --repair-all Repair tables : it repairs the table metadata of + existing tables. When using Cosmos DB, it + additionally repairs stored procedure attached + to each table + --replication-factor= + The replication factor (supported in Cassandra) + --replication-strategy= + The replication strategy, must be SimpleStrategy or + NetworkTopologyStrategy (supported in Cassandra) + --ru= Base resource unit (supported in DynamoDB, Cosmos DB) +``` + +For a sample properties file, see [`database.properties`](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties). + +{% capture notice--info %} +**Note** + +The following database-specific methods have been deprecated. Please use the [commands for configuring the properties file](#available-commands) instead. + +
+
+ + + + +
+ +
+ +```console +Usage: java -jar scalardb-schema-loader-.jar --cassandra [-D] + [-c=] -f= -h= + [-n=] [-p=] [-P=] + [-R=] [-u=] +Create/Delete Cassandra schemas + -c, --compaction-strategy= + Cassandra compaction strategy, must be LCS, STCS or TWCS + -D, --delete-all Delete tables + -f, --schema-file= + Path to the schema json file + -h, --host= Cassandra host IP + -n, --network-strategy= + Cassandra network strategy, must be SimpleStrategy or + NetworkTopologyStrategy + -p, --password= + Cassandra password + -P, --port= Cassandra Port + -R, --replication-factor= + Cassandra replication factor + --repair-all Repair tables : it repairs the table metadata of + existing tables + -u, --user= Cassandra user +``` +
+
+ +```console +Usage: java -jar scalardb-schema-loader-.jar --cosmos [-D] + [--no-scaling] -f= -h= -p= [-r=] +Create/Delete Cosmos DB schemas + -D, --delete-all Delete tables + -f, --schema-file= + Path to the schema json file + -h, --host= Cosmos DB account URI + --no-scaling Disable auto-scaling for Cosmos DB + -p, --password= Cosmos DB key + -r, --ru= Base resource unit + --repair-all Repair tables : it repairs the table metadata of + existing tables and repairs stored procedure + attached to each table +``` +
+
+ +```console +Usage: java -jar scalardb-schema-loader-.jar --dynamo [-D] + [--no-backup] [--no-scaling] [--endpoint-override=] + -f= -p= [-r=] --region= + -u= +Create/Delete DynamoDB schemas + -D, --delete-all Delete tables + --endpoint-override= + Endpoint with which the DynamoDB SDK should + communicate + -f, --schema-file= + Path to the schema json file + --no-backup Disable continuous backup for DynamoDB + --no-scaling Disable auto-scaling for DynamoDB + -p, --password= AWS access secret key + -r, --ru= Base resource unit + --region= AWS region + --repair-all Repair tables : it repairs the table metadata of + existing tables + -u, --user= AWS access key ID +``` +
+
+ +```console +Usage: java -jar scalardb-schema-loader-.jar --jdbc [-D] + -f= -j= -p= -u= +Create/Delete JDBC schemas + -D, --delete-all Delete tables + -f, --schema-file= + Path to the schema json file + -j, --jdbc-url= JDBC URL + -p, --password= + JDBC password + --repair-all Repair tables : it repairs the table metadata of + existing tables + -u, --user= JDBC user +``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Create namespaces and tables + +To create namespaces and tables by using a properties file, run the following command, replacing the contents in the angle brackets as described: + +```console +$ java -jar scalardb-schema-loader-.jar --config -f [--coordinator] +``` + +If `--coordinator` is specified, a [Coordinator table](api-guide.md#specify-operations-for-the-coordinator-table) will be created. + +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
+ +```console +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f [-n ] [-R ] +``` + +- If `-P ` is not supplied, it defaults to `9042`. +- If `-u ` is not supplied, it defaults to `cassandra`. +- If `-p ` is not supplied, it defaults to `cassandra`. +- `` should be `SimpleStrategy` or `NetworkTopologyStrategy` +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f [-r BASE_RESOURCE_UNIT] +``` + +- `` you can use a primary key or a secondary key. +- `-r BASE_RESOURCE_UNIT` is an option. You can specify the RU of each database. The maximum RU in tables in the database will be set. If you don't specify RU of tables, the database RU will be set with this option. By default, it's 400. +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f [-r BASE_RESOURCE_UNIT] +``` + +- `` should be a string to specify an AWS region like `ap-northeast-1`. +- `-r` option is almost the same as Cosmos DB for NoSQL option. However, the unit means DynamoDB capacity unit. The read and write capacity units are set the same value. +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f +``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Delete tables + +You can delete tables by using the properties file. To delete tables, run the following command, replacing the contents in the angle brackets as described: + +```console +$ java -jar scalardb-schema-loader-.jar --config -f [--coordinator] -D +``` + +If `--coordinator` is specified, the Coordinator table will be deleted as well. + +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
+ +```console +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f -D +``` +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f -D +``` +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f -D +``` +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f -D +``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Repair tables + +You can repair the table metadata of existing tables by using the properties file. To repair table metadata of existing tables, run the following command, replacing the contents in the angle brackets as described: + +```console +$ java -jar scalardb-schema-loader-.jar --config -f [--coordinator] --repair-all +``` + +If `--coordinator` is specified, the Coordinator table will be repaired as well. In addition, if you're using Cosmos DB for NoSQL, running this command will also repair stored procedures attached to each table. + +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
+ +```console +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f --repair-all +``` +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f --repair-all +``` +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region [--no-backup] -f --repair-all +``` +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f --repair-all +``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Sample schema file + +The following is a sample schema. For a sample schema file, see [`schema_sample.json`](https://github.com/scalar-labs/scalardb/blob/master/schema-loader/sample/schema_sample.json). + +```json +{ + "sample_db.sample_table": { + "transaction": false, + "partition-key": [ + "c1" + ], + "clustering-key": [ + "c4 ASC", + "c6 DESC" + ], + "columns": { + "c1": "INT", + "c2": "TEXT", + "c3": "BLOB", + "c4": "INT", + "c5": "BOOLEAN", + "c6": "INT" + }, + "secondary-index": [ + "c2", + "c4" + ] + }, + + "sample_db.sample_table1": { + "transaction": true, + "partition-key": [ + "c1" + ], + "clustering-key": [ + "c4" + ], + "columns": { + "c1": "INT", + "c2": "TEXT", + "c3": "INT", + "c4": "INT", + "c5": "BOOLEAN" + } + }, + + "sample_db.sample_table2": { + "transaction": false, + "partition-key": [ + "c1" + ], + "clustering-key": [ + "c4", + "c3" + ], + "columns": { + "c1": "INT", + "c2": "TEXT", + "c3": "INT", + "c4": "INT", + "c5": "BOOLEAN" + } + } +} +``` + +The schema has table definitions that include `columns`, `partition-key`, `clustering-key`, `secondary-index`, and `transaction` fields. + +- The `columns` field defines columns of the table and their data types. +- The `partition-key` field defines which columns the partition key is composed of. +- The `clustering-key` field defines which columns the clustering key is composed of. +- The `secondary-index` field defines which columns are indexed. +- The `transaction` field indicates whether the table is for transactions or not. + - If you set the `transaction` field to `true` or don't specify the `transaction` field, this tool creates a table with transaction metadata if needed. + - If you set the `transaction` field to `false`, this tool creates a table without any transaction metadata (that is, for a table with [Storage API](storage-abstraction.md)). + +You can also specify database or storage-specific options in the table definition as follows: + +```json +{ + "sample_db.sample_table3": { + "partition-key": [ + "c1" + ], + "columns": { + "c1": "INT", + "c2": "TEXT", + "c3": "BLOB" + }, + "compaction-strategy": "LCS", + "ru": 5000 + } +} +``` + +The database or storage-specific options you can specify are as follows: + +
+
+ + + + +
+ +
+ +The `compaction-strategy` option is the compaction strategy used. This option should be `STCS` (SizeTieredCompaction), `LCS` (LeveledCompactionStrategy), or `TWCS` (TimeWindowCompactionStrategy). +
+
+ +The `ru` option stands for Request Units. For details, see [RUs](#rus). +
+
+ +The `ru` option stands for Request Units. For details, see [RUs](#rus). +
+
+ +No options are available for JDBC databases. +
+
+ +## Scale for performance when using Cosmos DB for NoSQL or DynamoDB + +When using Cosmos DB for NoSQL or DynamoDB, you can scale by using Request Units (RUs) or auto-scaling. + +### RUs + +You can scale the throughput of Cosmos DB for NoSQL and DynamoDB by specifying the `--ru` option. When specifying this option, scaling applies to all tables or the `ru` parameter for each table. + +If the `--ru` option is not set, the default values will be `400` for Cosmos DB for NoSQL and `10` for DynamoDB. + +{% capture notice--info %} +**Note** + +- Schema Loader abstracts [Request Units](https://docs.microsoft.com/azure/cosmos-db/request-units) for Cosmos DB for NoSQL and [Capacity Units](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual) for DynamoDB with `RU`. Therefore, be sure to set an appropriate value depending on the database implementation. +- Be aware that Schema Loader sets the same value to both read capacity unit and write capacity unit for DynamoDB. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Auto-scaling + +By default, Schema Loader enables auto-scaling of RUs for all tables: RUs scale between 10 percent and 100 percent of a specified RU depending on the workload. For example, if you specify `-r 10000`, the RUs of each table auto-scales between `1000` and `10000`. + +{% capture notice--info %} +**Note** + +Auto-scaling for Cosmos DB for NoSQL is enabled only when this option is set to `4000` or more. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Data-type mapping between ScalarDB and other databases + +The following table shows the supported data types in ScalarDB and their mapping to the data types of other databases. + +| ScalarDB | MySQL | PostgreSQL | Oracle | SQL Server | +|-----------|----------|------------------|----------------|-----------------| +| BOOLEAN | boolean | boolean | number(1) | bit | +| INT | int | int | int | int | +| BIGINT | bigint | bigint | number(19) | bigint | +| FLOAT | double | float | binary_float | float(24) | +| DOUBLE | double | double precision | binary_double | float | +| TEXT | longtext | text | varchar2(4000) | varchar(8000) | +| BLOB | longblob | bytea | RAW(2000) | varbinary(8000) | + +However, the following data types in JDBC databases are converted differently when they are used as a primary key or a secondary index key. This is due to the limitations of RDB data types. + +| ScalarDB | MySQL | PostgreSQL | Oracle | +|----------|---------------|-------------------|--------------| +| TEXT | VARCHAR(64) | VARCHAR(10485760) | VARCHAR2(64) | +| BLOB | VARBINARY(64) | | RAW(64) | + +The value range of `BIGINT` in ScalarDB is from -2^53 to 2^53, regardless of the underlying database. + +If this data-type mapping doesn't match your application, please alter the tables to change the data types after creating them by using this tool. + +## Use Schema Loader in your application + +You can check the version of Schema Loader from the [Maven Central Repository](https://mvnrepository.com/artifact/com.scalar-labs/scalardb-schema-loader). For example in Gradle, you can add the following dependency to your `build.gradle` file, replacing `` with the version of Schema Loader that you want to use: + +```gradle +dependencies { + implementation 'com.scalar-labs:scalardb-schema-loader:' +} +``` + +### Create, repair, or delete tables + +You can create, delete, or repair tables that are defined in the schema by using Schema Loader. To do this, you can pass a ScalarDB properties file, schema, and additional options, if needed, as shown below: + +```java +public class SchemaLoaderSample { + public static int main(String... args) throws SchemaLoaderException { + Path configFilePath = Paths.get("database.properties"); + Path schemaFilePath = Paths.get("sample_schema.json"); + boolean createCoordinatorTables = true; // whether creating the coordinator tables or not + boolean deleteCoordinatorTables = true; // whether deleting the coordinator tables or not + + Map options = new HashMap<>(); + + options.put( + CassandraAdmin.REPLICATION_STRATEGY, ReplicationStrategy.SIMPLE_STRATEGY.toString()); + options.put(CassandraAdmin.COMPACTION_STRATEGY, CompactionStrategy.LCS.toString()); + options.put(CassandraAdmin.REPLICATION_FACTOR, "1"); + + options.put(DynamoAdmin.REQUEST_UNIT, "1"); + options.put(DynamoAdmin.NO_SCALING, "true"); + options.put(DynamoAdmin.NO_BACKUP, "true"); + + // Create tables + SchemaLoader.load(configFilePath, serializedSchemaJson, options, createCoordinatorTables); + + // Delete tables + SchemaLoader.unload(configFilePath, serializedSchemaJson, deleteCoordinatorTables); + + // Repair tables + SchemaLoader.repairTables(configFilePath, serializedSchemaJson, options, deleteCoordinatorTables); + + return 0; + } +} +``` + +You can also create, delete, or repair a schema by passing a serialized-schema JSON string (the raw text of a schema file) as shown below: + +```java +// Create tables. +SchemaLoader.load(configFilePath, serializedSchemaJson, options, createCoordinatorTables); + +// Repair tables. +SchemaLoader.repairTables(configFilePath, serializedSchemaJson, options, deleteCoordinatorTables); + +// Delete tables. +SchemaLoader.unload(configFilePath, serializedSchemaJson, deleteCoordinatorTables); +``` + +When configuring ScalarDB, you can use a `Properties` object as well, as shown below: + +```java +// Create tables. +SchemaLoader.load(properties, serializedSchemaJson, options, createCoordinatorTables); + +// Repair tables. +SchemaLoader.repairTables(properties, serializedSchemaJson, options, deleteCoordinatorTables); + +// Delete tables. +SchemaLoader.unload(properties, serializedSchemaJson, deleteCoordinatorTables); +``` diff --git a/docs/3.7/schema-loader.md b/docs/3.7/schema-loader.md new file mode 100644 index 00000000..aa1b3238 --- /dev/null +++ b/docs/3.7/schema-loader.md @@ -0,0 +1,716 @@ +# ScalarDB Schema Loader + +ScalarDB has its own data model and schema that maps to the implementation-specific data model and schema. In addition, ScalarDB stores internal metadata, such as transaction IDs, record versions, and transaction statuses, to manage transaction logs and statuses when you use the Consensus Commit transaction manager. + +Since managing the schema mapping and metadata for transactions can be difficult, you can use ScalarDB Schema Loader, which is a tool to create schemas that doesn't require you to need in-depth knowledge about schema mapping or metadata. + +You have two options to specify general CLI options in Schema Loader: + +- Pass the ScalarDB properties file and database-specific or storage-specific options. +- Pass database-specific or storage-specific options without the ScalarDB properties file. (Deprecated) + +{% capture notice--info %} +**Note** + +This tool supports only basic options to create, delete, repair, or alter a table. If you want to use the advanced features of a database, you must alter your tables with a database-specific tool after creating the tables with this tool. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Set up Schema Loader + +Select your preferred method to set up Schema Loader, and follow the instructions. + +
+
+ + +
+ +
+ +You can download the release versions of Schema Loader from the [ScalarDB Releases](https://github.com/scalar-labs/scalardb/releases) page. +
+
+ +You can pull the Docker image from the [Scalar container registry](https://github.com/orgs/scalar-labs/packages/container/package/scalardb-schema-loader) by running the following command, replacing the contents in the angle brackets as described: + +```console +$ docker run --rm -v : [-v :] ghcr.io/scalar-labs/scalardb-schema-loader: +``` + +{% capture notice--info %} +**Note** + +You can specify the same command arguments even if you use the fat JAR or the container. In the [Available commands](#available-commands) section, the JAR is used, but you can run the commands by using the container in the same way by replacing `java -jar scalardb-schema-loader-.jar` with `docker run --rm -v : [-v :] ghcr.io/scalar-labs/scalardb-schema-loader:`. +{% endcapture %} + +
{{ notice--info | markdownify }}
+
+
+ +## Run Schema Loader + +This section explains how to run Schema Loader. + +### Available commands + +Select how you would like to configure Schema Loader for your database. The preferred method is to use the properties file since other, database-specific methods are deprecated. + +The following commands are available when using the properties file: + +```console +Usage: java -jar scalardb-schema-loader-.jar [-D] [--coordinator] + [--no-backup] [--no-scaling] -c= + [--compaction-strategy=] [-f=] + [--replication-factor=] + [--replication-strategy=] [--ru=] +Create/Delete schemas in the storage defined in the config file + -A, --alter Alter tables : it will add new columns and create/delete + secondary index for existing tables. It compares the + provided table schema to the existing schema to decide + which columns need to be added and which indexes need + to be created or deleted + -c, --config= + Path to the config file of ScalarDB + --compaction-strategy= + The compaction strategy, must be LCS, STCS or TWCS + (supported in Cassandra) + --coordinator Create/delete/repair Coordinator tables + -D, --delete-all Delete tables + -f, --schema-file= + Path to the schema json file + --no-backup Disable continuous backup (supported in DynamoDB) + --no-scaling Disable auto-scaling (supported in DynamoDB, Cosmos DB) + --repair-all Repair tables : it repairs the table metadata of + existing tables. When using Cosmos DB, it + additionally repairs stored procedure attached + to each table + --replication-factor= + The replication factor (supported in Cassandra) + --replication-strategy= + The replication strategy, must be SimpleStrategy or + NetworkTopologyStrategy (supported in Cassandra) + --ru= Base resource unit (supported in DynamoDB, Cosmos DB) +``` + +For a sample properties file, see [`database.properties`](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties). + +{% capture notice--info %} +**Note** + +The following database-specific methods have been deprecated. Please use the [commands for configuring the properties file](#available-commands) instead. + +
+
+ + + + +
+ +
+ +```console +Usage: java -jar scalardb-schema-loader-.jar --cassandra [-D] + [-c=] -f= -h= + [-n=] [-p=] [-P=] + [-R=] [-u=] +Create/Delete Cassandra schemas + -A, --alter Alter tables : it will add new columns and create/delete + secondary index for existing tables. It compares the + provided table schema to the existing schema to decide + which columns need to be added and which indexes need + to be created or deleted + -c, --compaction-strategy= + Cassandra compaction strategy, must be LCS, STCS or TWCS + -D, --delete-all Delete tables + -f, --schema-file= + Path to the schema json file + -h, --host= Cassandra host IP + -n, --network-strategy= + Cassandra network strategy, must be SimpleStrategy or + NetworkTopologyStrategy + -p, --password= + Cassandra password + -P, --port= Cassandra Port + -R, --replication-factor= + Cassandra replication factor + --repair-all Repair tables : it repairs the table metadata of + existing tables + -u, --user= Cassandra user +``` +
+
+ +```console +Usage: java -jar scalardb-schema-loader-.jar --cosmos [-D] + [--no-scaling] -f= -h= -p= [-r=] +Create/Delete Cosmos DB schemas + -A, --alter Alter tables : it will add new columns and create/delete + secondary index for existing tables. It compares the + provided table schema to the existing schema to decide + which columns need to be added and which indexes need + to be created or deleted + -D, --delete-all Delete tables + -f, --schema-file= + Path to the schema json file + -h, --host= Cosmos DB account URI + --no-scaling Disable auto-scaling for Cosmos DB + -p, --password= Cosmos DB key + -r, --ru= Base resource unit + --repair-all Repair tables : it repairs the table metadata of + existing tables and repairs stored procedure + attached to each table +``` +
+
+ +```console +Usage: java -jar scalardb-schema-loader-.jar --dynamo [-D] + [--no-backup] [--no-scaling] [--endpoint-override=] + -f= -p= [-r=] --region= + -u= +Create/Delete DynamoDB schemas + -A, --alter Alter tables : it will add new columns and create/delete + secondary index for existing tables. It compares the + provided table schema to the existing schema to decide + which columns need to be added and which indexes need + to be created or deleted + -D, --delete-all Delete tables + --endpoint-override= + Endpoint with which the DynamoDB SDK should + communicate + -f, --schema-file= + Path to the schema json file + --no-backup Disable continuous backup for DynamoDB + --no-scaling Disable auto-scaling for DynamoDB + -p, --password= AWS access secret key + -r, --ru= Base resource unit + --region= AWS region + --repair-all Repair tables : it repairs the table metadata of + existing tables + -u, --user= AWS access key ID +``` +
+
+ +```console +Usage: java -jar scalardb-schema-loader-.jar --jdbc [-D] + -f= -j= -p= -u= +Create/Delete JDBC schemas + -A, --alter Alter tables : it will add new columns and create/delete + secondary index for existing tables. It compares the + provided table schema to the existing schema to decide + which columns need to be added and which indexes need + to be created or deleted + -D, --delete-all Delete tables + -f, --schema-file= + Path to the schema json file + -j, --jdbc-url= JDBC URL + -p, --password= + JDBC password + --repair-all Repair tables : it repairs the table metadata of + existing tables + -u, --user= JDBC user +``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Create namespaces and tables + +To create namespaces and tables by using a properties file, run the following command, replacing the contents in the angle brackets as described: + +```console +$ java -jar scalardb-schema-loader-.jar --config -f [--coordinator] +``` + +If `--coordinator` is specified, a [Coordinator table](api-guide.md#specify-operations-for-the-coordinator-table) will be created. + +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
+ +```console +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f [-n ] [-R ] +``` + +- If `-P ` is not supplied, it defaults to `9042`. +- If `-u ` is not supplied, it defaults to `cassandra`. +- If `-p ` is not supplied, it defaults to `cassandra`. +- `` should be `SimpleStrategy` or `NetworkTopologyStrategy` +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f [-r BASE_RESOURCE_UNIT] +``` + +- `` you can use a primary key or a secondary key. +- `-r BASE_RESOURCE_UNIT` is an option. You can specify the RU of each database. The maximum RU in tables in the database will be set. If you don't specify RU of tables, the database RU will be set with this option. By default, it's 400. +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f [-r BASE_RESOURCE_UNIT] +``` + +- `` should be a string to specify an AWS region like `ap-northeast-1`. +- `-r` option is almost the same as Cosmos DB for NoSQL option. However, the unit means DynamoDB capacity unit. The read and write capacity units are set the same value. +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f +``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Alter tables + +You can use a command to add new columns to and create or delete a secondary index for existing tables. This command compares the provided table schema to the existing schema to decide which columns need to be added and which indexes need to be created or deleted. + +To add new colums to and create or delete a secondary index for existing tables, run the following command, replacing the contents in the angle brackets as described: + +```console +$ java -jar scalardb-schema-loader-.jar --config -f --alter +``` + +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
+ +```console +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f --alter +``` +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f --alter +``` +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f --alter +``` +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f --alter +``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Delete tables + +You can delete tables by using the properties file. To delete tables, run the following command, replacing the contents in the angle brackets as described: + +```console +$ java -jar scalardb-schema-loader-.jar --config -f [--coordinator] -D +``` + +If `--coordinator` is specified, the Coordinator table will be deleted as well. + +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
+ +```console +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f -D +``` +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f -D +``` +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f -D +``` +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f -D +``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Repair tables + +You can repair the table metadata of existing tables by using the properties file. To repair table metadata of existing tables, run the following command, replacing the contents in the angle brackets as described: + +```console +$ java -jar scalardb-schema-loader-.jar --config -f [--coordinator] --repair-all +``` + +If `--coordinator` is specified, the Coordinator table will be repaired as well. In addition, if you're using Cosmos DB for NoSQL, running this command will also repair stored procedures attached to each table. + +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
+ +```console +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f --repair-all +``` +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f --repair-all +``` +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region [--no-backup] -f --repair-all +``` +
+
+ +```console +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f --repair-all +``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Sample schema file + +The following is a sample schema. For a sample schema file, see [`schema_sample.json`](https://github.com/scalar-labs/scalardb/blob/master/schema-loader/sample/schema_sample.json). + +```json +{ + "sample_db.sample_table": { + "transaction": false, + "partition-key": [ + "c1" + ], + "clustering-key": [ + "c4 ASC", + "c6 DESC" + ], + "columns": { + "c1": "INT", + "c2": "TEXT", + "c3": "BLOB", + "c4": "INT", + "c5": "BOOLEAN", + "c6": "INT" + }, + "secondary-index": [ + "c2", + "c4" + ] + }, + + "sample_db.sample_table1": { + "transaction": true, + "partition-key": [ + "c1" + ], + "clustering-key": [ + "c4" + ], + "columns": { + "c1": "INT", + "c2": "TEXT", + "c3": "INT", + "c4": "INT", + "c5": "BOOLEAN" + } + }, + + "sample_db.sample_table2": { + "transaction": false, + "partition-key": [ + "c1" + ], + "clustering-key": [ + "c4", + "c3" + ], + "columns": { + "c1": "INT", + "c2": "TEXT", + "c3": "INT", + "c4": "INT", + "c5": "BOOLEAN" + } + } +} +``` + +The schema has table definitions that include `columns`, `partition-key`, `clustering-key`, `secondary-index`, and `transaction` fields. + +- The `columns` field defines columns of the table and their data types. +- The `partition-key` field defines which columns the partition key is composed of. +- The `clustering-key` field defines which columns the clustering key is composed of. +- The `secondary-index` field defines which columns are indexed. +- The `transaction` field indicates whether the table is for transactions or not. + - If you set the `transaction` field to `true` or don't specify the `transaction` field, this tool creates a table with transaction metadata if needed. + - If you set the `transaction` field to `false`, this tool creates a table without any transaction metadata (that is, for a table with [Storage API](storage-abstraction.md)). + +You can also specify database or storage-specific options in the table definition as follows: + +```json +{ + "sample_db.sample_table3": { + "partition-key": [ + "c1" + ], + "columns": { + "c1": "INT", + "c2": "TEXT", + "c3": "BLOB" + }, + "compaction-strategy": "LCS", + "ru": 5000 + } +} +``` + +The database or storage-specific options you can specify are as follows: + +
+
+ + + + +
+ +
+ +The `compaction-strategy` option is the compaction strategy used. This option should be `STCS` (SizeTieredCompaction), `LCS` (LeveledCompactionStrategy), or `TWCS` (TimeWindowCompactionStrategy). +
+
+ +The `ru` option stands for Request Units. For details, see [RUs](#rus). +
+
+ +The `ru` option stands for Request Units. For details, see [RUs](#rus). +
+
+ +No options are available for JDBC databases. +
+
+ +## Scale for performance when using Cosmos DB for NoSQL or DynamoDB + +When using Cosmos DB for NoSQL or DynamoDB, you can scale by using Request Units (RUs) or auto-scaling. + +### RUs + +You can scale the throughput of Cosmos DB for NoSQL and DynamoDB by specifying the `--ru` option. When specifying this option, scaling applies to all tables or the `ru` parameter for each table. + +If the `--ru` option is not set, the default values will be `400` for Cosmos DB for NoSQL and `10` for DynamoDB. + +{% capture notice--info %} +**Note** + +- Schema Loader abstracts [Request Units](https://docs.microsoft.com/azure/cosmos-db/request-units) for Cosmos DB for NoSQL and [Capacity Units](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual) for DynamoDB with `RU`. Therefore, be sure to set an appropriate value depending on the database implementation. +- Be aware that Schema Loader sets the same value to both read capacity unit and write capacity unit for DynamoDB. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Auto-scaling + +By default, Schema Loader enables auto-scaling of RUs for all tables: RUs scale between 10 percent and 100 percent of a specified RU depending on the workload. For example, if you specify `-r 10000`, the RUs of each table auto-scales between `1000` and `10000`. + +{% capture notice--info %} +**Note** + +Auto-scaling for Cosmos DB for NoSQL is enabled only when this option is set to `4000` or more. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Data-type mapping between ScalarDB and other databases + +The following table shows the supported data types in ScalarDB and their mapping to the data types of other databases. + +| ScalarDB | Cassandra | Cosmos DB for NoSQL | DynamoDB | MySQL | PostgreSQL | Oracle | SQL Server | +|-----------|-----------|---------------------|----------|----------|------------------|----------------|-----------------| +| BOOLEAN | boolean | boolean (JSON) | BOOL | boolean | boolean | number(1) | bit | +| INT | int | number (JSON) | N | int | int | int | int | +| BIGINT | bigint | number (JSON) | N | bigint | bigint | number(19) | bigint | +| FLOAT | float | number (JSON) | N | double | float | binary_float | float(24) | +| DOUBLE | double | number (JSON) | N | double | double precision | binary_double | float | +| TEXT | text | string (JSON) | S | longtext | text | varchar2(4000) | varchar(8000) | +| BLOB | blob | string (JSON) | B | longblob | bytea | RAW(2000) | varbinary(8000) | + +However, the following data types in JDBC databases are converted differently when they are used as a primary key or a secondary index key. This is due to the limitations of RDB data types. + +| ScalarDB | MySQL | PostgreSQL | Oracle | +|----------|---------------|-------------------|--------------| +| TEXT | VARCHAR(64) | VARCHAR(10485760) | VARCHAR2(64) | +| BLOB | VARBINARY(64) | | RAW(64) | + +The value range of `BIGINT` in ScalarDB is from -2^53 to 2^53, regardless of the underlying database. + +If this data-type mapping doesn't match your application, please alter the tables to change the data types after creating them by using this tool. + +## Internal metadata for Consensus Commit + +The Consensus Commit transaction manager manages metadata (for example, transaction ID, record version, and transaction status) stored along with the actual records to handle transactions properly. + +Thus, along with any columns that the application requires, additional columns for the metadata need to be defined in the schema. Additionally, this tool creates a table with the metadata if you use the Consensus Commit transaction manager. + +## Use Schema Loader in your application + +You can check the version of Schema Loader from the [Maven Central Repository](https://mvnrepository.com/artifact/com.scalar-labs/scalardb-schema-loader). For example in Gradle, you can add the following dependency to your `build.gradle` file, replacing `` with the version of Schema Loader that you want to use: + +```gradle +dependencies { + implementation 'com.scalar-labs:scalardb-schema-loader:' +} +``` + +### Create, alter, repair, or delete tables + +You can create, alter, delete, or repair tables that are defined in the schema by using Schema Loader. To do this, you can pass a ScalarDB properties file, schema, and additional options, if needed, as shown below: + +```java +public class SchemaLoaderSample { + public static int main(String... args) throws SchemaLoaderException { + Path configFilePath = Paths.get("database.properties"); + // "sample_schema.json" and "altered_sample_schema.json" can be found in the "/sample" directory. + Path schemaFilePath = Paths.get("sample_schema.json"); + Path alteredSchemaFilePath = Paths.get("altered_sample_schema.json"); + boolean createCoordinatorTables = true; // whether to create the Coordinator table or not + boolean deleteCoordinatorTables = true; // whether to delete the Coordinator table or not + boolean repairCoordinatorTables = true; // whether to repair the Coordinator table or not + + Map tableCreationOptions = new HashMap<>(); + + tableCreationOptions.put( + CassandraAdmin.REPLICATION_STRATEGY, ReplicationStrategy.SIMPLE_STRATEGY.toString()); + tableCreationOptions.put(CassandraAdmin.COMPACTION_STRATEGY, CompactionStrategy.LCS.toString()); + tableCreationOptions.put(CassandraAdmin.REPLICATION_FACTOR, "1"); + + tableCreationOptions.put(DynamoAdmin.REQUEST_UNIT, "1"); + tableCreationOptions.put(DynamoAdmin.NO_SCALING, "true"); + tableCreationOptions.put(DynamoAdmin.NO_BACKUP, "true"); + + Map indexCreationOptions = new HashMap<>(); + indexCreationOptions.put(DynamoAdmin.NO_SCALING, "true"); + + Map tableReparationOptions = new HashMap<>(); + indexCreationOptions.put(DynamoAdmin.NO_BACKUP, "true"); + + // Create tables. + SchemaLoader.load(configFilePath, schemaFilePath, tableCreationOptions, createCoordinatorTables); + + // Alter tables. + SchemaLoader.alterTables(configFilePath, alteredSchemaFilePath, indexCreationOptions); + + // Repair tables. + SchemaLoader.repairTables(configFilePath, schemaFilePath, tableReparationOptions, repairCoordinatorTables); + + // Delete tables. + SchemaLoader.unload(configFilePath, schemaFilePath, deleteCoordinatorTables); + + return 0; + } +} +``` + +You can also create, delete, or repair a schema by passing a serialized-schema JSON string (the raw text of a schema file) as shown below: + +```java +// Create tables. +SchemaLoader.load(configFilePath, serializedSchemaJson, tableCreationOptions, createCoordinatorTables); + +// Alter tables. +SchemaLoader.alterTables(configFilePath, serializedAlteredSchemaFilePath, indexCreationOptions); + +// Repair tables. +SchemaLoader.repairTables(configFilePath, serializedSchemaJson, tableReparationOptions, repairCoordinatorTables); + +// Delete tables. +SchemaLoader.unload(configFilePath, serializedSchemaJson, deleteCoordinatorTables); +``` + +When configuring ScalarDB, you can use a `Properties` object as well, as shown below: + +```java +// Create tables. +SchemaLoader.load(properties, serializedSchemaJson, tableCreationOptions, createCoordinatorTables); + +// Alter tables. +SchemaLoader.alterTables(properties, serializedAlteredSchemaFilePath, indexCreationOptions); + +// Repair tables. +SchemaLoader.repairTables(properties, serializedSchemaJson, tableReparationOptions, repairCoordinatorTables); + +// Delete tables. +SchemaLoader.unload(properties, serializedSchemaJson, deleteCoordinatorTables); +``` diff --git a/docs/3.8/schema-loader.md b/docs/3.8/schema-loader.md index d898a18e..aa1b3238 100644 --- a/docs/3.8/schema-loader.md +++ b/docs/3.8/schema-loader.md @@ -1,50 +1,66 @@ # ScalarDB Schema Loader -ScalarDB has its own data model and schema, that maps to the implementation specific data model and schema. -Also, it stores internal metadata (e.g., transaction ID, record version, transaction status) for managing transaction logs and statuses when you use the Consensus Commit transaction manager. -It is a little hard for application developers to manage the schema mapping and metadata for transactions, so we offer a tool called ScalarDB Schema Loader for creating schema without requiring much knowledge about those. +ScalarDB has its own data model and schema that maps to the implementation-specific data model and schema. In addition, ScalarDB stores internal metadata, such as transaction IDs, record versions, and transaction statuses, to manage transaction logs and statuses when you use the Consensus Commit transaction manager. -There are two ways to specify general CLI options in Schema Loader: - - Pass a ScalarDB configuration file and database/storage-specific options additionally. - - Pass the options without a ScalarDB configuration (Deprecated). +Since managing the schema mapping and metadata for transactions can be difficult, you can use ScalarDB Schema Loader, which is a tool to create schemas that doesn't require you to need in-depth knowledge about schema mapping or metadata. -Note that this tool supports only basic options to create/delete/repair/alter a table. If you want -to use the advanced features of a database, please alter your tables with a database specific tool after creating them with this tool. +You have two options to specify general CLI options in Schema Loader: -# Usage +- Pass the ScalarDB properties file and database-specific or storage-specific options. +- Pass database-specific or storage-specific options without the ScalarDB properties file. (Deprecated) -## Install +{% capture notice--info %} +**Note** -The release versions of `schema-loader` can be downloaded from [releases](https://github.com/scalar-labs/scalardb/releases) page of ScalarDB. +This tool supports only basic options to create, delete, repair, or alter a table. If you want to use the advanced features of a database, you must alter your tables with a database-specific tool after creating the tables with this tool. +{% endcapture %} -## Build +
{{ notice--info | markdownify }}
-In case you want to build `schema-loader` from the source: -```console -$ ./gradlew schema-loader:shadowJar -``` -- The built fat jar file is `schema-loader/build/libs/scalardb-schema-loader-.jar` +## Set up Schema Loader -## Docker +Select your preferred method to set up Schema Loader, and follow the instructions. -You can pull the docker image from [Scalar's container registry](https://github.com/orgs/scalar-labs/packages/container/package/scalardb-schema-loader). -```console -docker run --rm -v : [-v :] ghcr.io/scalar-labs/scalardb-schema-loader: -``` -- Note that you can specify the same command arguments even if you use the fat jar or the container. The example commands in the next section are shown with a jar, but you can run the commands with the container in the same way by replacing `java -jar scalardb-schema-loader-.jar` with `docker run --rm -v : [-v :] ghcr.io/scalar-labs/scalardb-schema-loader:`. +
+
+ + +
+ +
+ +You can download the release versions of Schema Loader from the [ScalarDB Releases](https://github.com/scalar-labs/scalardb/releases) page. +
+
+ +You can pull the Docker image from the [Scalar container registry](https://github.com/orgs/scalar-labs/packages/container/package/scalardb-schema-loader) by running the following command, replacing the contents in the angle brackets as described: -You can also build the docker image as follows. ```console -$ ./gradlew schema-loader:docker +$ docker run --rm -v : [-v :] ghcr.io/scalar-labs/scalardb-schema-loader: ``` -## Run +{% capture notice--info %} +**Note** + +You can specify the same command arguments even if you use the fat JAR or the container. In the [Available commands](#available-commands) section, the JAR is used, but you can run the commands by using the container in the same way by replacing `java -jar scalardb-schema-loader-.jar` with `docker run --rm -v : [-v :] ghcr.io/scalar-labs/scalardb-schema-loader:`. +{% endcapture %} + +
{{ notice--info | markdownify }}
+
+
+ +## Run Schema Loader + +This section explains how to run Schema Loader. ### Available commands -For using a config file: +Select how you would like to configure Schema Loader for your database. The preferred method is to use the properties file since other, database-specific methods are deprecated. + +The following commands are available when using the properties file: + ```console -Usage: java -jar scalardb-schema-loader-.jar [-D] [--coordinator] +Usage: java -jar scalardb-schema-loader-.jar [-D] [--coordinator] [--no-backup] [--no-scaling] -c= [--compaction-strategy=] [-f=] [--replication-factor=] @@ -60,7 +76,7 @@ Create/Delete schemas in the storage defined in the config file --compaction-strategy= The compaction strategy, must be LCS, STCS or TWCS (supported in Cassandra) - --coordinator Create/delete/repair coordinator tables + --coordinator Create/delete/repair Coordinator tables -D, --delete-all Delete tables -f, --schema-file= Path to the schema json file @@ -78,9 +94,57 @@ Create/Delete schemas in the storage defined in the config file --ru= Base resource unit (supported in DynamoDB, Cosmos DB) ``` -For Cosmos DB (Deprecated. Please use the command using a config file instead): +For a sample properties file, see [`database.properties`](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties). + +{% capture notice--info %} +**Note** + +The following database-specific methods have been deprecated. Please use the [commands for configuring the properties file](#available-commands) instead. + +
+
+ + + + +
+ +
+ +```console +Usage: java -jar scalardb-schema-loader-.jar --cassandra [-D] + [-c=] -f= -h= + [-n=] [-p=] [-P=] + [-R=] [-u=] +Create/Delete Cassandra schemas + -A, --alter Alter tables : it will add new columns and create/delete + secondary index for existing tables. It compares the + provided table schema to the existing schema to decide + which columns need to be added and which indexes need + to be created or deleted + -c, --compaction-strategy= + Cassandra compaction strategy, must be LCS, STCS or TWCS + -D, --delete-all Delete tables + -f, --schema-file= + Path to the schema json file + -h, --host= Cassandra host IP + -n, --network-strategy= + Cassandra network strategy, must be SimpleStrategy or + NetworkTopologyStrategy + -p, --password= + Cassandra password + -P, --port= Cassandra Port + -R, --replication-factor= + Cassandra replication factor + --repair-all Repair tables : it repairs the table metadata of + existing tables + -u, --user= Cassandra user +``` +
+
+ ```console -Usage: java -jar scalardb-schema-loader-.jar --cosmos [-D] +Usage: java -jar scalardb-schema-loader-.jar --cosmos [-D] [--no-scaling] -f= -h= -p= [-r=] Create/Delete Cosmos DB schemas -A, --alter Alter tables : it will add new columns and create/delete @@ -99,10 +163,11 @@ Create/Delete Cosmos DB schemas existing tables and repairs stored procedure attached to each table ``` +
+
-For DynamoDB (Deprecated. Please use the command using a config file instead): ```console -Usage: java -jar scalardb-schema-loader-.jar --dynamo [-D] +Usage: java -jar scalardb-schema-loader-.jar --dynamo [-D] [--no-backup] [--no-scaling] [--endpoint-override=] -f= -p= [-r=] --region= -u= @@ -127,41 +192,11 @@ Create/Delete DynamoDB schemas existing tables -u, --user= AWS access key ID ``` +
+
-For Cassandra (Deprecated. Please use the command using a config file instead): ```console -Usage: java -jar scalardb-schema-loader-.jar --cassandra [-D] - [-c=] -f= -h= - [-n=] [-p=] [-P=] - [-R=] [-u=] -Create/Delete Cassandra schemas - -A, --alter Alter tables : it will add new columns and create/delete - secondary index for existing tables. It compares the - provided table schema to the existing schema to decide - which columns need to be added and which indexes need - to be created or deleted - -c, --compaction-strategy= - Cassandra compaction strategy, must be LCS, STCS or TWCS - -D, --delete-all Delete tables - -f, --schema-file= - Path to the schema json file - -h, --host= Cassandra host IP - -n, --network-strategy= - Cassandra network strategy, must be SimpleStrategy or - NetworkTopologyStrategy - -p, --password= - Cassandra password - -P, --port= Cassandra Port - -R, --replication-factor= - Cassandra replication factor - --repair-all Repair tables : it repairs the table metadata of - existing tables - -u, --user= Cassandra user -``` - -For a JDBC database (Deprecated. Please use the command using a config file instead): -```console -Usage: java -jar scalardb-schema-loader-.jar --jdbc [-D] +Usage: java -jar scalardb-schema-loader-.jar --jdbc [-D] -f= -j= -p= -u= Create/Delete JDBC schemas -A, --alter Alter tables : it will add new columns and create/delete @@ -179,142 +214,234 @@ Create/Delete JDBC schemas existing tables -u, --user= JDBC user ``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
### Create namespaces and tables -For using a config file (Sample config file can be found [here](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties)): +To create namespaces and tables by using a properties file, run the following command, replacing the contents in the angle brackets as described: + ```console -$ java -jar scalardb-schema-loader-.jar --config -f schema.json [--coordinator] +$ java -jar scalardb-schema-loader-.jar --config -f [--coordinator] ``` - - if `--coordinator` is specified, the coordinator tables will be created. -For using CLI arguments fully for configuration (Deprecated. Please use the command using a config file instead): +If `--coordinator` is specified, a [Coordinator table](api-guide.md#specify-operations-for-the-coordinator-table) will be created. + +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
+ ```console -# For Cosmos DB -$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f schema.json [-r BASE_RESOURCE_UNIT] +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f [-n ] [-R ] ``` - - `` you can use a primary key or a secondary key. - - `-r BASE_RESOURCE_UNIT` is an option. You can specify the RU of each database. The maximum RU in tables in the database will be set. If you don't specify RU of tables, the database RU will be set with this option. By default, it's 400. + +- If `-P ` is not supplied, it defaults to `9042`. +- If `-u ` is not supplied, it defaults to `cassandra`. +- If `-p ` is not supplied, it defaults to `cassandra`. +- `` should be `SimpleStrategy` or `NetworkTopologyStrategy` +
+
```console -# For DynamoDB -$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f schema.json [-r BASE_RESOURCE_UNIT] +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f [-r BASE_RESOURCE_UNIT] ``` - - `` should be a string to specify an AWS region like `ap-northeast-1`. - - `-r` option is almost the same as Cosmos DB option. However, the unit means DynamoDB capacity unit. The read and write capacity units are set the same value. + +- `` you can use a primary key or a secondary key. +- `-r BASE_RESOURCE_UNIT` is an option. You can specify the RU of each database. The maximum RU in tables in the database will be set. If you don't specify RU of tables, the database RU will be set with this option. By default, it's 400. +
+
```console -# For Cassandra -$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f schema.json [-n ] [-R ] +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f [-r BASE_RESOURCE_UNIT] ``` - - If `-P ` is not supplied, it defaults to `9042`. - - If `-u ` is not supplied, it defaults to `cassandra`. - - If `-p ` is not supplied, it defaults to `cassandra`. - - `` should be `SimpleStrategy` or `NetworkTopologyStrategy` + +- `` should be a string to specify an AWS region like `ap-northeast-1`. +- `-r` option is almost the same as Cosmos DB for NoSQL option. However, the unit means DynamoDB capacity unit. The read and write capacity units are set the same value. +
+
```console -# For a JDBC database -$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f schema.json +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f ``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
### Alter tables -This command will add new columns and create/delete secondary index for existing tables. It compares -the provided table schema to the existing schema to decide which columns need to be added and which -indexes need to be created or deleted. +You can use a command to add new columns to and create or delete a secondary index for existing tables. This command compares the provided table schema to the existing schema to decide which columns need to be added and which indexes need to be created or deleted. -For using config file (Sample config file can be found [here](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties)): +To add new colums to and create or delete a secondary index for existing tables, run the following command, replacing the contents in the angle brackets as described: ```console -$ java -jar scalardb-schema-loader-.jar --config -f schema.json --alter +$ java -jar scalardb-schema-loader-.jar --config -f --alter ``` -For using CLI arguments fully for configuration (Deprecated. Please use the command using a config -file instead): +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
```console -# For Cosmos DB -$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f schema.json --alter +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f --alter ``` +
+
```console -# For DynamoDB -$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f schema.json --alter +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f --alter ``` +
+
```console -# For Cassandra -$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f schema.json --alter +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f --alter ``` +
+
```console -# For a JDBC database -$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f schema.json --alter +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f --alter ``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
### Delete tables -For using config file (Sample config file can be found [here](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties)): +You can delete tables by using the properties file. To delete tables, run the following command, replacing the contents in the angle brackets as described: + ```console -$ java -jar scalardb-schema-loader-.jar --config -f schema.json [--coordinator] -D +$ java -jar scalardb-schema-loader-.jar --config -f [--coordinator] -D ``` - - if `--coordinator` is specified, the coordinator tables will be deleted. - -For using CLI arguments fully for configuration (Deprecated. Please use the command using a config file instead): + +If `--coordinator` is specified, the Coordinator table will be deleted as well. + +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
+ ```console -# For Cosmos DB -$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f schema.json -D +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f -D ``` +
+
```console -# For DynamoDB -$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f schema.json -D +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f -D ``` +
+
```console -# For Cassandra -$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f schema.json -D +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f -D ``` +
+
```console -# For a JDBC database -$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f schema.json -D +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f -D ``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
### Repair tables -This command will repair the table metadata of existing tables. When using Cosmos DB, it additionally repairs stored procedure attached to each table. +You can repair the table metadata of existing tables by using the properties file. To repair table metadata of existing tables, run the following command, replacing the contents in the angle brackets as described: -For using config file (Sample config file can be found [here](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties)): ```console -$ java -jar scalardb-schema-loader-.jar --config -f schema.json [--coordinator] --repair-all +$ java -jar scalardb-schema-loader-.jar --config -f [--coordinator] --repair-all ``` -- if `--coordinator` is specified, the coordinator tables will be repaired as well. -For using CLI arguments fully for configuration (Deprecated. Please use the command using a config file instead): +If `--coordinator` is specified, the Coordinator table will be repaired as well. In addition, if you're using Cosmos DB for NoSQL, running this command will also repair stored procedures attached to each table. + +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
+ ```console -# For Cosmos DB -$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f schema.json --repair-all +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f --repair-all ``` +
+
```console -# For DynamoDB -$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region [--no-backup] -f schema.json --repair-all +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f --repair-all ``` +
+
```console -# For Cassandra -$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f schema.json --repair-all +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region [--no-backup] -f --repair-all ``` +
+
```console -# For a JDBC database -$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f schema.json --repair-all +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f --repair-all ``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
### Sample schema file -The sample schema is as follows (Sample schema file can be found [here](https://github.com/scalar-labs/scalardb/blob/master/schema-loader/sample/schema_sample.json)): +The following is a sample schema. For a sample schema file, see [`schema_sample.json`](https://github.com/scalar-labs/scalardb/blob/master/schema-loader/sample/schema_sample.json). ```json { @@ -379,14 +506,17 @@ The sample schema is as follows (Sample schema file can be found [here](https:// ``` The schema has table definitions that include `columns`, `partition-key`, `clustering-key`, `secondary-index`, and `transaction` fields. -The `columns` field defines columns of the table and their data types. -The `partition-key` field defines which columns the partition key is composed of, and `clustering-key` defines which columns the clustering key is composed of. -The `secondary-index` field defines which columns are indexed. -The `transaction` field indicates whether the table is for transactions or not. -If you set the `transaction` field to `true` or don't specify the `transaction` field, this tool creates a table with transaction metadata if needed. -If not, it creates a table without any transaction metadata (that is, for a table with [Storage API](storage-abstraction.md)). - -You can also specify database/storage-specific options in the table definition as follows: + +- The `columns` field defines columns of the table and their data types. +- The `partition-key` field defines which columns the partition key is composed of. +- The `clustering-key` field defines which columns the clustering key is composed of. +- The `secondary-index` field defines which columns are indexed. +- The `transaction` field indicates whether the table is for transactions or not. + - If you set the `transaction` field to `true` or don't specify the `transaction` field, this tool creates a table with transaction metadata if needed. + - If you set the `transaction` field to `false`, this tool creates a table without any transaction metadata (that is, for a table with [Storage API](storage-abstraction.md)). + +You can also specify database or storage-specific options in the table definition as follows: + ```json { "sample_db.sample_table3": { @@ -404,83 +534,120 @@ You can also specify database/storage-specific options in the table definition a } ``` -The database/storage-specific options you can specify are as follows: +The database or storage-specific options you can specify are as follows: -For Cassandra: -- `compaction-strategy`, a compaction strategy. It should be `STCS` (SizeTieredCompaction), `LCS` (LeveledCompactionStrategy) or `TWCS` (TimeWindowCompactionStrategy). +
+
+ + + + +
-For DynamoDB and Cosmos DB: -- `ru`, a request unit. Please see [RU](#ru) for the details. +
-## Scaling Performance +The `compaction-strategy` option is the compaction strategy used. This option should be `STCS` (SizeTieredCompaction), `LCS` (LeveledCompactionStrategy), or `TWCS` (TimeWindowCompactionStrategy). +
+
-### RU +The `ru` option stands for Request Units. For details, see [RUs](#rus). +
+
-You can scale the throughput of Cosmos DB and DynamoDB by specifying `--ru` option (which applies to all the tables) or `ru` parameter for each table. The default values are `400` for Cosmos DB and `10` for DynamoDB respectively, which are set without `--ru` option. +The `ru` option stands for Request Units. For details, see [RUs](#rus). +
+
-Note that the schema loader abstracts [Request Unit](https://docs.microsoft.com/azure/cosmos-db/request-units) of Cosmos DB and [Capacity Unit](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual) of DynamoDB with `RU`. -So, please set an appropriate value depending on the database implementations. Please also note that the schema loader sets the same value to both Read Capacity Unit and Write Capacity Unit for DynamoDB. +No options are available for JDBC databases. +
+
+ +## Scale for performance when using Cosmos DB for NoSQL or DynamoDB + +When using Cosmos DB for NoSQL or DynamoDB, you can scale by using Request Units (RUs) or auto-scaling. + +### RUs + +You can scale the throughput of Cosmos DB for NoSQL and DynamoDB by specifying the `--ru` option. When specifying this option, scaling applies to all tables or the `ru` parameter for each table. + +If the `--ru` option is not set, the default values will be `400` for Cosmos DB for NoSQL and `10` for DynamoDB. + +{% capture notice--info %} +**Note** + +- Schema Loader abstracts [Request Units](https://docs.microsoft.com/azure/cosmos-db/request-units) for Cosmos DB for NoSQL and [Capacity Units](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual) for DynamoDB with `RU`. Therefore, be sure to set an appropriate value depending on the database implementation. +- Be aware that Schema Loader sets the same value to both read capacity unit and write capacity unit for DynamoDB. +{% endcapture %} + +
{{ notice--info | markdownify }}
### Auto-scaling -By default, the schema loader enables auto-scaling of RU for all tables: RU is scaled in or out between 10% and 100% of a specified RU depending on a workload. For example, if you specify `-r 10000`, RU of each table is scaled in or out between 1000 and 10000. Note that auto-scaling of Cosmos DB is enabled only when you set more than or equal to 4000 RU. +By default, Schema Loader enables auto-scaling of RUs for all tables: RUs scale between 10 percent and 100 percent of a specified RU depending on the workload. For example, if you specify `-r 10000`, the RUs of each table auto-scales between `1000` and `10000`. -## Data type mapping between ScalarDB and the other databases +{% capture notice--info %} +**Note** -Here are the supported data types in ScalarDB and their mapping to the data types of other databases. +Auto-scaling for Cosmos DB for NoSQL is enabled only when this option is set to `4000` or more. +{% endcapture %} -| ScalarDB | Cassandra | Cosmos DB | DynamoDB | MySQL | PostgreSQL | Oracle | SQL Server | -|-----------|-----------|----------------|----------|----------|------------------|----------------|-----------------| -| BOOLEAN | boolean | boolean (JSON) | BOOL | boolean | boolean | number(1) | bit | -| INT | int | number (JSON) | N | int | int | int | int | -| BIGINT | bigint | number (JSON) | N | bigint | bigint | number(19) | bigint | -| FLOAT | float | number (JSON) | N | double | float | binary_float | float(24) | -| DOUBLE | double | number (JSON) | N | double | double precision | binary_double | float | -| TEXT | text | string (JSON) | S | longtext | text | varchar2(4000) | varchar(8000) | -| BLOB | blob | string (JSON) | B | longblob | bytea | RAW(2000) | varbinary(8000) | +
{{ notice--info | markdownify }}
-However, the following types in JDBC databases are converted differently when they are used as a primary key or a secondary index key due to the limitations of RDB data types. +## Data-type mapping between ScalarDB and other databases + +The following table shows the supported data types in ScalarDB and their mapping to the data types of other databases. + +| ScalarDB | Cassandra | Cosmos DB for NoSQL | DynamoDB | MySQL | PostgreSQL | Oracle | SQL Server | +|-----------|-----------|---------------------|----------|----------|------------------|----------------|-----------------| +| BOOLEAN | boolean | boolean (JSON) | BOOL | boolean | boolean | number(1) | bit | +| INT | int | number (JSON) | N | int | int | int | int | +| BIGINT | bigint | number (JSON) | N | bigint | bigint | number(19) | bigint | +| FLOAT | float | number (JSON) | N | double | float | binary_float | float(24) | +| DOUBLE | double | number (JSON) | N | double | double precision | binary_double | float | +| TEXT | text | string (JSON) | S | longtext | text | varchar2(4000) | varchar(8000) | +| BLOB | blob | string (JSON) | B | longblob | bytea | RAW(2000) | varbinary(8000) | + +However, the following data types in JDBC databases are converted differently when they are used as a primary key or a secondary index key. This is due to the limitations of RDB data types. | ScalarDB | MySQL | PostgreSQL | Oracle | |----------|---------------|-------------------|--------------| | TEXT | VARCHAR(64) | VARCHAR(10485760) | VARCHAR2(64) | | BLOB | VARBINARY(64) | | RAW(64) | -The value range of `BIGINT` in ScalarDB is from -2^53 to 2^53 regardless of the underlying database. +The value range of `BIGINT` in ScalarDB is from -2^53 to 2^53, regardless of the underlying database. -If this data type mapping doesn't match your application, please alter the tables to change the data types after creating them with this tool. +If this data-type mapping doesn't match your application, please alter the tables to change the data types after creating them by using this tool. ## Internal metadata for Consensus Commit -The Consensus Commit transaction manager manages metadata (e.g., transaction ID, record version, transaction status) stored along with the actual records to handle transactions properly. -Thus, along with any required columns by the application, additional columns for the metadata need to be defined in the schema. -Additionaly, this tool creates a table with the metadata when you use the Consensus Commit transaction manager. +The Consensus Commit transaction manager manages metadata (for example, transaction ID, record version, and transaction status) stored along with the actual records to handle transactions properly. + +Thus, along with any columns that the application requires, additional columns for the metadata need to be defined in the schema. Additionally, this tool creates a table with the metadata if you use the Consensus Commit transaction manager. + +## Use Schema Loader in your application + +You can check the version of Schema Loader from the [Maven Central Repository](https://mvnrepository.com/artifact/com.scalar-labs/scalardb-schema-loader). For example in Gradle, you can add the following dependency to your `build.gradle` file, replacing `` with the version of Schema Loader that you want to use: -## Using Schema Loader in your program -You can check the version of `schema-loader` from [maven central repository](https://mvnrepository.com/artifact/com.scalar-labs/scalardb-schema-loader). -For example in Gradle, you can add the following dependency to your build.gradle. Please replace the `` with the version you want to use. ```gradle dependencies { - implementation 'com.scalar-labs:scalardb-schema-loader:' + implementation 'com.scalar-labs:scalardb-schema-loader:' } ``` -### Create, alter, repair and delete +### Create, alter, repair, or delete tables -You can create, alter, delete and repair tables that are defined in the schema using SchemaLoader by -simply passing ScalarDB configuration file, schema, and additional options if needed as shown -below. +You can create, alter, delete, or repair tables that are defined in the schema by using Schema Loader. To do this, you can pass a ScalarDB properties file, schema, and additional options, if needed, as shown below: ```java public class SchemaLoaderSample { public static int main(String... args) throws SchemaLoaderException { Path configFilePath = Paths.get("database.properties"); - // "sample_schema.json" and "altered_sample_schema.json" can be found in the "/sample" directory + // "sample_schema.json" and "altered_sample_schema.json" can be found in the "/sample" directory. Path schemaFilePath = Paths.get("sample_schema.json"); Path alteredSchemaFilePath = Paths.get("altered_sample_schema.json"); - boolean createCoordinatorTables = true; // whether to create the coordinator tables or not - boolean deleteCoordinatorTables = true; // whether to delete the coordinator tables or not - boolean repairCoordinatorTables = true; // whether to repair the coordinator tables or not + boolean createCoordinatorTables = true; // whether to create the Coordinator table or not + boolean deleteCoordinatorTables = true; // whether to delete the Coordinator table or not + boolean repairCoordinatorTables = true; // whether to repair the Coordinator table or not Map tableCreationOptions = new HashMap<>(); @@ -499,16 +666,16 @@ public class SchemaLoaderSample { Map tableReparationOptions = new HashMap<>(); indexCreationOptions.put(DynamoAdmin.NO_BACKUP, "true"); - // Create tables + // Create tables. SchemaLoader.load(configFilePath, schemaFilePath, tableCreationOptions, createCoordinatorTables); - // Alter tables + // Alter tables. SchemaLoader.alterTables(configFilePath, alteredSchemaFilePath, indexCreationOptions); - // Repair tables + // Repair tables. SchemaLoader.repairTables(configFilePath, schemaFilePath, tableReparationOptions, repairCoordinatorTables); - // Delete tables + // Delete tables. SchemaLoader.unload(configFilePath, schemaFilePath, deleteCoordinatorTables); return 0; @@ -516,33 +683,34 @@ public class SchemaLoaderSample { } ``` -You can also create, delete or repair a schema by passing a serialized schema JSON string (the raw text of a schema file). +You can also create, delete, or repair a schema by passing a serialized-schema JSON string (the raw text of a schema file) as shown below: + ```java -// Create tables +// Create tables. SchemaLoader.load(configFilePath, serializedSchemaJson, tableCreationOptions, createCoordinatorTables); -// Alter tables +// Alter tables. SchemaLoader.alterTables(configFilePath, serializedAlteredSchemaFilePath, indexCreationOptions); -// Repair tables +// Repair tables. SchemaLoader.repairTables(configFilePath, serializedSchemaJson, tableReparationOptions, repairCoordinatorTables); -// Delete tables +// Delete tables. SchemaLoader.unload(configFilePath, serializedSchemaJson, deleteCoordinatorTables); ``` -For ScalarDB configuration, a `Properties` object can be used as well. +When configuring ScalarDB, you can use a `Properties` object as well, as shown below: ```java -// Create tables +// Create tables. SchemaLoader.load(properties, serializedSchemaJson, tableCreationOptions, createCoordinatorTables); -// Alter tables +// Alter tables. SchemaLoader.alterTables(properties, serializedAlteredSchemaFilePath, indexCreationOptions); -// Repair tables +// Repair tables. SchemaLoader.repairTables(properties, serializedSchemaJson, tableReparationOptions, repairCoordinatorTables); -// Delete tables +// Delete tables. SchemaLoader.unload(properties, serializedSchemaJson, deleteCoordinatorTables); ``` diff --git a/docs/3.9/scalardb-analytics-postgresql/scalardb-fdw.md b/docs/3.9/scalardb-analytics-postgresql/scalardb-fdw.md new file mode 100644 index 00000000..78fe9787 --- /dev/null +++ b/docs/3.9/scalardb-analytics-postgresql/scalardb-fdw.md @@ -0,0 +1,174 @@ +# ScalarDB FDW + +ScalarDB FDW is a PostgreSQL extension that implements a foreign data wrapper (FDW) for [ScalarDB](https://www.scalar-labs.com/scalardb/). + +ScalarDB FDW uses the Java Native Interface to directly utilize ScalarDB as a library inside the FDW and read data from external databases via scan operations for ScalarDB. + +## Prerequisites + +You must have the following prerequisites set up in your environment. + +### JDK + +You must install a version of the Java Development Kit (JDK) that is compatible with ScalarDB. In addition, you must set the `JAVA_HOME` environment variable, which points to your JDK installation directory. + +Note that since these extensions use the Java Native Interface (JNI) internally, you must include the dynamic library of the Java virtual machine (JVM), such as `libjvm.so`, in the library search path. + +### PostgreSQL + +This extension supports PostgreSQL 13 or later. For details on how to install PostgreSQL, see the official documentation at [Server Administration](https://www.postgresql.org/docs/current/admin.html). + +## Build and installation + +You can build and install this extension by running the following command. + +```console +make install +``` + +### Common build errors + +This section describes some common build errors that you might encounter. + +#### ld: library not found for -ljvm + +Normally, the build script finds the path for `libjvm.so` and properly sets it as a library search path. However, if you encounter the error `ld: library not found for -ljvm`, please copy the `libjvm.so` file to the default library search path. For example: + +```console +ln -s //libjvm.so /usr/lib64/libjvm.so +``` + +## Usage + +This section provides a usage example and available options for FDW for ScalarDB. + +### Example + +The following example shows you how to install and create the necessary components, and then run a query by using the FDW extension. + +#### 1. Install the extension + +For details on how to install the extension, see the [Build and installation](#build-and-installation) section. + +#### 2. Create an extension + +To create an extension, run the following command: + +```sql +CREATE EXTENSION scalardb_fdw; +``` + +#### 3. Create a foreign server + +To create a foreign server, run the following command: + +```sql +CREATE SERVER scalardb FOREIGN DATA WRAPPER scalardb_fdw OPTIONS ( + config_file_path '/path/to/scalardb.properties' +); +``` + +#### 4. Create user mapping + +To create user mapping, run the following command: + +```sql +CREATE USER MAPPING FOR PUBLIC SERVER scalardb; +``` + +#### 5. Create a foreign table + +To create a foreign table, run the following command: + +```sql +CREATE FOREIGN TABLE sample_table ( + pk int, + ck1 int, + ck2 int, + boolean_col boolean, + bigint_col bigint, + float_col double precision, + double_col double precision, + text_col text, + blob_col bytea +) SERVER scalardb OPTIONS ( + namespace 'ns', + table_name 'sample_table' +); +``` + +#### 6. Run a query + +To run a query, run the following command: + +```sql +select * from sample_table; +``` + +### Available options + +You can set the following options for ScalarDB FDW objects. + +#### `CREATE SERVER` + +You can set the following options on a ScalarDB foreign server object: + +| Name | Required | Type | Description | +| ------------------ | -------- | -------- | --------------------------------------------------------------- | +| `config_file_path` | **Yes** | `string` | The path to the ScalarDB config file. | +| `max_heap_size` | No | `string` | The maximum heap size of JVM. The format is the same as `-Xmx`. | + +#### `CREATE USER MAPPING` + +Currently, no options exist for `CREATE USER MAPPING`. + +#### `CREATE FOREIGN SERVER` + +The following options can be set on a ScalarDB foreign table object: + +| Name | Required | Type | Description | +| ------------ | -------- | -------- | ---------------------------------------------------------------- | +| `namespace` | **Yes** | `string` | The name of the namespace of the table in the ScalarDB instance. | +| `table_name` | **Yes** | `string` | The name of the table in the ScalarDB instance. | + +### Data-type mapping + +| ScalarDB | PostgreSQL | +| -------- | ---------------- | +| BOOLEAN | boolean | +| INT | int | +| BIGINT | bigint | +| FLOAT | float | +| DOUBLE | double precision | +| TEXT | text | +| BLOB | bytea | + +## Testing + +This section describes how to test FDW for ScalarDB. + +### Set up a ScalarDB instance for testing + +Before testing FDW for ScalarDB, you must have a running ScalarDB instance that contains test data. You can set up the instance and load the test data by running the following commands: + +```console +./test/setup.sh +``` + +If you want to reset the instances, you can run the following command, then the above setup command again. + +```console +./test/cleanup.sh +``` + +### Run regression tests + +You can run regression tests by running the following command **after** you have installed the FDW extension. + +```console +make installcheck +``` + +## Limitations + +- This extension aims to enable analytical query processing on ScalarDB-managed databases. Therefore, this extension only supports reading data from ScalarDB. diff --git a/docs/3.9/scalardb-analytics-postgresql/schema-importer.md b/docs/3.9/scalardb-analytics-postgresql/schema-importer.md new file mode 100644 index 00000000..d25ae5c3 --- /dev/null +++ b/docs/3.9/scalardb-analytics-postgresql/schema-importer.md @@ -0,0 +1,60 @@ +# Schema Importer + +Schema Importer is a CLI tool for automatically configuring PostgreSQL. By using this tool, your PostgreSQL database can have identical database objects, such as namespaces and tables, as your ScalarDB instance. + +Schema Importer reads the ScalarDB configuration file, retrieves the schemas of the tables defined in ScalarDB, and creates the corresponding foreign data wrapper external tables and views in that order. For more information, refer to [Getting Started with ScalarDB Analytics with PostgreSQL](getting-started.md). + +## Build Schema Importer + +You can build Schema Importer by using [Gradle](https://gradle.org/). To build Schema Importer, run the following command: + +```console +./gradlew build +``` + +You may want to build a fat JAR file so that you can launch Schema Importer by using `java -jar`. To build the fat JAR, run the following command: + + ```console + ./gradlew shadowJar + ``` + +After you build the fat JAR, you can find the fat JAR file in the `app/build/libs/` directory. + +## Run Schema Importer + +To run Schema Importer by using the fat JAR file, run the following command: + +```console +java -jar +``` +Available options are as follows: + +| Name | Required | Description | Default | +| --------------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------ | +| `--config` | **Yes** | Path to the ScalarDB configuration file | | +| `--config-on-postgres-host` | No | Path to the ScalarDB configuration file on the PostgreSQL-running host | The same value as `--config` will be used. | +| `--namespace`, `-n` | **Yes** | Namespaces to import into the analytics instance. You can specify the `--namespace` option multiple times if you have two or more namespaces. | | +| `--host` | No | PostgreSQL host | localhost | +| `--port` | No | PostgreSQL port | 5432 | +| `--database` | No | PostgreSQL port | postgres | +| `--user` | No | PostgreSQL user | postgres | +| `--password` | No | PostgreSQL password | | +| `--debug` | No | Enable debug mode | | + + +## Test Schema Importer + +To test Schema Importer, run the following command: + +```console +./gradlew test +``` + +## Build a Docker image of Schema Importer + + +To build a Docker image of Schema Importer, run the following command, replacing `` with the tag version of Schema Importer that you want to use: + +```console +docker build -t ghcr.io/scalar-labs/scalardb-analytics-postgresql-schema-importer: -f ./app/Dockerfile . +``` diff --git a/docs/3.9/schema-loader.md b/docs/3.9/schema-loader.md index c20dcc4d..7c981fc9 100644 --- a/docs/3.9/schema-loader.md +++ b/docs/3.9/schema-loader.md @@ -1,50 +1,66 @@ # ScalarDB Schema Loader -ScalarDB has its own data model and schema, that maps to the implementation specific data model and schema. -Also, it stores internal metadata (e.g., transaction ID, record version, transaction status) for managing transaction logs and statuses when you use the Consensus Commit transaction manager. -It is a little hard for application developers to manage the schema mapping and metadata for transactions, so we offer a tool called ScalarDB Schema Loader for creating schema without requiring much knowledge about those. +ScalarDB has its own data model and schema that maps to the implementation-specific data model and schema. In addition, ScalarDB stores internal metadata, such as transaction IDs, record versions, and transaction statuses, to manage transaction logs and statuses when you use the Consensus Commit transaction manager. -There are two ways to specify general CLI options in Schema Loader: - - Pass a ScalarDB configuration file and database/storage-specific options additionally. - - Pass the options without a ScalarDB configuration (Deprecated). +Since managing the schema mapping and metadata for transactions can be difficult, you can use ScalarDB Schema Loader, which is a tool to create schemas that doesn't require you to need in-depth knowledge about schema mapping or metadata. -Note that this tool supports only basic options to create/delete/repair/alter a table. If you want -to use the advanced features of a database, please alter your tables with a database specific tool after creating them with this tool. +You have two options to specify general CLI options in Schema Loader: -# Usage +- Pass the ScalarDB properties file and database-specific or storage-specific options. +- Pass database-specific or storage-specific options without the ScalarDB properties file. (Deprecated) -## Install +{% capture notice--info %} +**Note** -The release versions of `schema-loader` can be downloaded from [releases](https://github.com/scalar-labs/scalardb/releases) page of ScalarDB. +This tool supports only basic options to create, delete, repair, or alter a table. If you want to use the advanced features of a database, you must alter your tables with a database-specific tool after creating the tables with this tool. +{% endcapture %} -## Build +
{{ notice--info | markdownify }}
-In case you want to build `schema-loader` from the source: -```console -$ ./gradlew schema-loader:shadowJar -``` -- The built fat jar file is `schema-loader/build/libs/scalardb-schema-loader-.jar` +## Set up Schema Loader -## Docker +Select your preferred method to set up Schema Loader, and follow the instructions. -You can pull the docker image from [Scalar's container registry](https://github.com/orgs/scalar-labs/packages/container/package/scalardb-schema-loader). -```console -docker run --rm -v : [-v :] ghcr.io/scalar-labs/scalardb-schema-loader: -``` -- Note that you can specify the same command arguments even if you use the fat jar or the container. The example commands in the next section are shown with a jar, but you can run the commands with the container in the same way by replacing `java -jar scalardb-schema-loader-.jar` with `docker run --rm -v : [-v :] ghcr.io/scalar-labs/scalardb-schema-loader:`. +
+
+ + +
+ +
+ +You can download the release versions of Schema Loader from the [ScalarDB Releases](https://github.com/scalar-labs/scalardb/releases) page. +
+
+ +You can pull the Docker image from the [Scalar container registry](https://github.com/orgs/scalar-labs/packages/container/package/scalardb-schema-loader) by running the following command, replacing the contents in the angle brackets as described: -You can also build the docker image as follows. ```console -$ ./gradlew schema-loader:docker +$ docker run --rm -v : [-v :] ghcr.io/scalar-labs/scalardb-schema-loader: ``` -## Run +{% capture notice--info %} +**Note** + +You can specify the same command arguments even if you use the fat JAR or the container. In the [Available commands](#available-commands) section, the JAR is used, but you can run the commands by using the container in the same way by replacing `java -jar scalardb-schema-loader-.jar` with `docker run --rm -v : [-v :] ghcr.io/scalar-labs/scalardb-schema-loader:`. +{% endcapture %} + +
{{ notice--info | markdownify }}
+
+
+ +## Run Schema Loader + +This section explains how to run Schema Loader. ### Available commands -For using a config file: +Select how you would like to configure Schema Loader for your database. The preferred method is to use the properties file since other, database-specific methods are deprecated. + +The following commands are available when using the properties file: + ```console -Usage: java -jar scalardb-schema-loader-.jar [-D] [--coordinator] +Usage: java -jar scalardb-schema-loader-.jar [-D] [--coordinator] [--no-backup] [--no-scaling] -c= [--compaction-strategy=] [-f=] [--replication-factor=] @@ -60,7 +76,7 @@ Create/Delete schemas in the storage defined in the config file --compaction-strategy= The compaction strategy, must be LCS, STCS or TWCS (supported in Cassandra) - --coordinator Create/delete/repair coordinator tables + --coordinator Create/delete/repair Coordinator tables -D, --delete-all Delete tables -f, --schema-file= Path to the schema json file @@ -78,9 +94,57 @@ Create/Delete schemas in the storage defined in the config file --ru= Base resource unit (supported in DynamoDB, Cosmos DB) ``` -For Cosmos DB for NoSQL (Deprecated. Please use the command using a config file instead): +For a sample properties file, see [`database.properties`](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties). + +{% capture notice--info %} +**Note** + +The following database-specific methods have been deprecated. Please use the [commands for configuring the properties file](#available-commands) instead. + +
+
+ + + + +
+ +
+ +```console +Usage: java -jar scalardb-schema-loader-.jar --cassandra [-D] + [-c=] -f= -h= + [-n=] [-p=] [-P=] + [-R=] [-u=] +Create/Delete Cassandra schemas + -A, --alter Alter tables : it will add new columns and create/delete + secondary index for existing tables. It compares the + provided table schema to the existing schema to decide + which columns need to be added and which indexes need + to be created or deleted + -c, --compaction-strategy= + Cassandra compaction strategy, must be LCS, STCS or TWCS + -D, --delete-all Delete tables + -f, --schema-file= + Path to the schema json file + -h, --host= Cassandra host IP + -n, --network-strategy= + Cassandra network strategy, must be SimpleStrategy or + NetworkTopologyStrategy + -p, --password= + Cassandra password + -P, --port= Cassandra Port + -R, --replication-factor= + Cassandra replication factor + --repair-all Repair tables : it repairs the table metadata of + existing tables + -u, --user= Cassandra user +``` +
+
+ ```console -Usage: java -jar scalardb-schema-loader-.jar --cosmos [-D] +Usage: java -jar scalardb-schema-loader-.jar --cosmos [-D] [--no-scaling] -f= -h= -p= [-r=] Create/Delete Cosmos DB schemas -A, --alter Alter tables : it will add new columns and create/delete @@ -99,10 +163,11 @@ Create/Delete Cosmos DB schemas existing tables and repairs stored procedure attached to each table ``` +
+
-For DynamoDB (Deprecated. Please use the command using a config file instead): ```console -Usage: java -jar scalardb-schema-loader-.jar --dynamo [-D] +Usage: java -jar scalardb-schema-loader-.jar --dynamo [-D] [--no-backup] [--no-scaling] [--endpoint-override=] -f= -p= [-r=] --region= -u= @@ -127,41 +192,11 @@ Create/Delete DynamoDB schemas existing tables -u, --user= AWS access key ID ``` +
+
-For Cassandra (Deprecated. Please use the command using a config file instead): ```console -Usage: java -jar scalardb-schema-loader-.jar --cassandra [-D] - [-c=] -f= -h= - [-n=] [-p=] [-P=] - [-R=] [-u=] -Create/Delete Cassandra schemas - -A, --alter Alter tables : it will add new columns and create/delete - secondary index for existing tables. It compares the - provided table schema to the existing schema to decide - which columns need to be added and which indexes need - to be created or deleted - -c, --compaction-strategy= - Cassandra compaction strategy, must be LCS, STCS or TWCS - -D, --delete-all Delete tables - -f, --schema-file= - Path to the schema json file - -h, --host= Cassandra host IP - -n, --network-strategy= - Cassandra network strategy, must be SimpleStrategy or - NetworkTopologyStrategy - -p, --password= - Cassandra password - -P, --port= Cassandra Port - -R, --replication-factor= - Cassandra replication factor - --repair-all Repair tables : it repairs the table metadata of - existing tables - -u, --user= Cassandra user -``` - -For a JDBC database (Deprecated. Please use the command using a config file instead): -```console -Usage: java -jar scalardb-schema-loader-.jar --jdbc [-D] +Usage: java -jar scalardb-schema-loader-.jar --jdbc [-D] -f= -j= -p= -u= Create/Delete JDBC schemas -A, --alter Alter tables : it will add new columns and create/delete @@ -179,142 +214,234 @@ Create/Delete JDBC schemas existing tables -u, --user= JDBC user ``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
### Create namespaces and tables -For using a config file (Sample config file can be found [here](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties)): +To create namespaces and tables by using a properties file, run the following command, replacing the contents in the angle brackets as described: + ```console -$ java -jar scalardb-schema-loader-.jar --config -f schema.json [--coordinator] +$ java -jar scalardb-schema-loader-.jar --config -f [--coordinator] ``` - - if `--coordinator` is specified, the coordinator tables will be created. -For using CLI arguments fully for configuration (Deprecated. Please use the command using a config file instead): +If `--coordinator` is specified, a [Coordinator table](api-guide.md#specify-operations-for-the-coordinator-table) will be created. + +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
+ ```console -# For Cosmos DB for NoSQL -$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f schema.json [-r BASE_RESOURCE_UNIT] +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f [-n ] [-R ] ``` - - `` you can use a primary key or a secondary key. - - `-r BASE_RESOURCE_UNIT` is an option. You can specify the RU of each database. The maximum RU in tables in the database will be set. If you don't specify RU of tables, the database RU will be set with this option. By default, it's 400. + +- If `-P ` is not supplied, it defaults to `9042`. +- If `-u ` is not supplied, it defaults to `cassandra`. +- If `-p ` is not supplied, it defaults to `cassandra`. +- `` should be `SimpleStrategy` or `NetworkTopologyStrategy` +
+
```console -# For DynamoDB -$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f schema.json [-r BASE_RESOURCE_UNIT] +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f [-r BASE_RESOURCE_UNIT] ``` - - `` should be a string to specify an AWS region like `ap-northeast-1`. - - `-r` option is almost the same as Cosmos DB for NoSQL option. However, the unit means DynamoDB capacity unit. The read and write capacity units are set the same value. + +- `` you can use a primary key or a secondary key. +- `-r BASE_RESOURCE_UNIT` is an option. You can specify the RU of each database. The maximum RU in tables in the database will be set. If you don't specify RU of tables, the database RU will be set with this option. By default, it's 400. +
+
```console -# For Cassandra -$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f schema.json [-n ] [-R ] +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f [-r BASE_RESOURCE_UNIT] ``` - - If `-P ` is not supplied, it defaults to `9042`. - - If `-u ` is not supplied, it defaults to `cassandra`. - - If `-p ` is not supplied, it defaults to `cassandra`. - - `` should be `SimpleStrategy` or `NetworkTopologyStrategy` + +- `` should be a string to specify an AWS region like `ap-northeast-1`. +- `-r` option is almost the same as Cosmos DB for NoSQL option. However, the unit means DynamoDB capacity unit. The read and write capacity units are set the same value. +
+
```console -# For a JDBC database -$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f schema.json +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f ``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
### Alter tables -This command will add new columns and create/delete secondary index for existing tables. It compares -the provided table schema to the existing schema to decide which columns need to be added and which -indexes need to be created or deleted. +You can use a command to add new columns to and create or delete a secondary index for existing tables. This command compares the provided table schema to the existing schema to decide which columns need to be added and which indexes need to be created or deleted. -For using config file (Sample config file can be found [here](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties)): +To add new colums to and create or delete a secondary index for existing tables, run the following command, replacing the contents in the angle brackets as described: ```console -$ java -jar scalardb-schema-loader-.jar --config -f schema.json --alter +$ java -jar scalardb-schema-loader-.jar --config -f --alter ``` -For using CLI arguments fully for configuration (Deprecated. Please use the command using a config -file instead): +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
```console -# For Cosmos DB for NoSQL -$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f schema.json --alter +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f --alter ``` +
+
```console -# For DynamoDB -$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f schema.json --alter +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f --alter ``` +
+
```console -# For Cassandra -$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f schema.json --alter +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f --alter ``` +
+
```console -# For a JDBC database -$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f schema.json --alter +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f --alter ``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
### Delete tables -For using config file (Sample config file can be found [here](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties)): +You can delete tables by using the properties file. To delete tables, run the following command, replacing the contents in the angle brackets as described: + ```console -$ java -jar scalardb-schema-loader-.jar --config -f schema.json [--coordinator] -D +$ java -jar scalardb-schema-loader-.jar --config -f [--coordinator] -D ``` - - if `--coordinator` is specified, the coordinator tables will be deleted. - -For using CLI arguments fully for configuration (Deprecated. Please use the command using a config file instead): + +If `--coordinator` is specified, the Coordinator table will be deleted as well. + +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
+ ```console -# For Cosmos DB for NoSQL -$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f schema.json -D +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f -D ``` +
+
```console -# For DynamoDB -$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f schema.json -D +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f -D ``` +
+
```console -# For Cassandra -$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f schema.json -D +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f -D ``` +
+
```console -# For a JDBC database -$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f schema.json -D +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f -D ``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
### Repair tables -This command will repair the table metadata of existing tables. When using Cosmos DB for NoSQL, it additionally repairs stored procedure attached to each table. +You can repair the table metadata of existing tables by using the properties file. To repair table metadata of existing tables, run the following command, replacing the contents in the angle brackets as described: -For using config file (Sample config file can be found [here](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties)): ```console -$ java -jar scalardb-schema-loader-.jar --config -f schema.json [--coordinator] --repair-all +$ java -jar scalardb-schema-loader-.jar --config -f [--coordinator] --repair-all ``` -- if `--coordinator` is specified, the coordinator tables will be repaired as well. -For using CLI arguments fully for configuration (Deprecated. Please use the command using a config file instead): +If `--coordinator` is specified, the Coordinator table will be repaired as well. In addition, if you're using Cosmos DB for NoSQL, running this command will also repair stored procedures attached to each table. + +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
+ ```console -# For Cosmos DB for NoSQL -$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f schema.json --repair-all +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f --repair-all ``` +
+
```console -# For DynamoDB -$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region [--no-backup] -f schema.json --repair-all +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f --repair-all ``` +
+
```console -# For Cassandra -$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f schema.json --repair-all +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region [--no-backup] -f --repair-all ``` +
+
```console -# For a JDBC database -$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f schema.json --repair-all +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f --repair-all ``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
### Sample schema file -The sample schema is as follows (Sample schema file can be found [here](https://github.com/scalar-labs/scalardb/blob/master/schema-loader/sample/schema_sample.json)): +The following is a sample schema. For a sample schema file, see [`schema_sample.json`](https://github.com/scalar-labs/scalardb/blob/master/schema-loader/sample/schema_sample.json). ```json { @@ -379,14 +506,17 @@ The sample schema is as follows (Sample schema file can be found [here](https:// ``` The schema has table definitions that include `columns`, `partition-key`, `clustering-key`, `secondary-index`, and `transaction` fields. -The `columns` field defines columns of the table and their data types. -The `partition-key` field defines which columns the partition key is composed of, and `clustering-key` defines which columns the clustering key is composed of. -The `secondary-index` field defines which columns are indexed. -The `transaction` field indicates whether the table is for transactions or not. -If you set the `transaction` field to `true` or don't specify the `transaction` field, this tool creates a table with transaction metadata if needed. -If not, it creates a table without any transaction metadata (that is, for a table with [Storage API](storage-abstraction.md)). - -You can also specify database/storage-specific options in the table definition as follows: + +- The `columns` field defines columns of the table and their data types. +- The `partition-key` field defines which columns the partition key is composed of. +- The `clustering-key` field defines which columns the clustering key is composed of. +- The `secondary-index` field defines which columns are indexed. +- The `transaction` field indicates whether the table is for transactions or not. + - If you set the `transaction` field to `true` or don't specify the `transaction` field, this tool creates a table with transaction metadata if needed. + - If you set the `transaction` field to `false`, this tool creates a table without any transaction metadata (that is, for a table with [Storage API](storage-abstraction.md)). + +You can also specify database or storage-specific options in the table definition as follows: + ```json { "sample_db.sample_table3": { @@ -404,30 +534,68 @@ You can also specify database/storage-specific options in the table definition a } ``` -The database/storage-specific options you can specify are as follows: +The database or storage-specific options you can specify are as follows: -For Cassandra: -- `compaction-strategy`, a compaction strategy. It should be `STCS` (SizeTieredCompaction), `LCS` (LeveledCompactionStrategy) or `TWCS` (TimeWindowCompactionStrategy). +
+
+ + + + +
-For DynamoDB and Cosmos DB for NoSQL: -- `ru`, a request unit. Please see [RU](#ru) for the details. +
-## Scaling Performance +The `compaction-strategy` option is the compaction strategy used. This option should be `STCS` (SizeTieredCompaction), `LCS` (LeveledCompactionStrategy), or `TWCS` (TimeWindowCompactionStrategy). +
+
-### RU +The `ru` option stands for Request Units. For details, see [RUs](#rus). +
+
-You can scale the throughput of Cosmos DB for NoSQL and DynamoDB by specifying `--ru` option (which applies to all the tables) or `ru` parameter for each table. The default values are `400` for Cosmos DB for NoSQL and `10` for DynamoDB respectively, which are set without `--ru` option. +The `ru` option stands for Request Units. For details, see [RUs](#rus). +
+
-Note that the schema loader abstracts [Request Unit](https://docs.microsoft.com/azure/cosmos-db/request-units) of Cosmos DB for NoSQL and [Capacity Unit](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual) of DynamoDB with `RU`. -So, please set an appropriate value depending on the database implementations. Please also note that the schema loader sets the same value to both Read Capacity Unit and Write Capacity Unit for DynamoDB. +No options are available for JDBC databases. +
+
+ +## Scale for performance when using Cosmos DB for NoSQL or DynamoDB + +When using Cosmos DB for NoSQL or DynamoDB, you can scale by using Request Units (RUs) or auto-scaling. + +### RUs + +You can scale the throughput of Cosmos DB for NoSQL and DynamoDB by specifying the `--ru` option. When specifying this option, scaling applies to all tables or the `ru` parameter for each table. + +If the `--ru` option is not set, the default values will be `400` for Cosmos DB for NoSQL and `10` for DynamoDB. + +{% capture notice--info %} +**Note** + +- Schema Loader abstracts [Request Units](https://docs.microsoft.com/azure/cosmos-db/request-units) for Cosmos DB for NoSQL and [Capacity Units](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual) for DynamoDB with `RU`. Therefore, be sure to set an appropriate value depending on the database implementation. +- Be aware that Schema Loader sets the same value to both read capacity unit and write capacity unit for DynamoDB. +{% endcapture %} + +
{{ notice--info | markdownify }}
### Auto-scaling -By default, the schema loader enables auto-scaling of RU for all tables: RU is scaled in or out between 10% and 100% of a specified RU depending on a workload. For example, if you specify `-r 10000`, RU of each table is scaled in or out between 1000 and 10000. Note that auto-scaling of Cosmos DB for NoSQL is enabled only when you set more than or equal to 4000 RU. +By default, Schema Loader enables auto-scaling of RUs for all tables: RUs scale between 10 percent and 100 percent of a specified RU depending on the workload. For example, if you specify `-r 10000`, the RUs of each table auto-scales between `1000` and `10000`. -## Data type mapping between ScalarDB and the other databases +{% capture notice--info %} +**Note** -Here are the supported data types in ScalarDB and their mapping to the data types of other databases. +Auto-scaling for Cosmos DB for NoSQL is enabled only when this option is set to `4000` or more. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Data-type mapping between ScalarDB and other databases + +The following table shows the supported data types in ScalarDB and their mapping to the data types of other databases. | ScalarDB | Cassandra | Cosmos DB for NoSQL | DynamoDB | MySQL | PostgreSQL | Oracle | SQL Server | SQLite | |-----------|-----------|---------------------|----------|----------|------------------|----------------|-----------------|---------| @@ -439,48 +607,47 @@ Here are the supported data types in ScalarDB and their mapping to the data type | TEXT | text | string (JSON) | S | longtext | text | varchar2(4000) | varchar(8000) | text | | BLOB | blob | string (JSON) | B | longblob | bytea | RAW(2000) | varbinary(8000) | blob | -However, the following types in JDBC databases are converted differently when they are used as a primary key or a secondary index key due to the limitations of RDB data types. +However, the following data types in JDBC databases are converted differently when they are used as a primary key or a secondary index key. This is due to the limitations of RDB data types. | ScalarDB | MySQL | PostgreSQL | Oracle | |----------|---------------|-------------------|--------------| | TEXT | VARCHAR(64) | VARCHAR(10485760) | VARCHAR2(64) | | BLOB | VARBINARY(64) | | RAW(64) | -The value range of `BIGINT` in ScalarDB is from -2^53 to 2^53 regardless of the underlying database. +The value range of `BIGINT` in ScalarDB is from -2^53 to 2^53, regardless of the underlying database. -If this data type mapping doesn't match your application, please alter the tables to change the data types after creating them with this tool. +If this data-type mapping doesn't match your application, please alter the tables to change the data types after creating them by using this tool. ## Internal metadata for Consensus Commit -The Consensus Commit transaction manager manages metadata (e.g., transaction ID, record version, transaction status) stored along with the actual records to handle transactions properly. -Thus, along with any required columns by the application, additional columns for the metadata need to be defined in the schema. -Additionaly, this tool creates a table with the metadata when you use the Consensus Commit transaction manager. +The Consensus Commit transaction manager manages metadata (for example, transaction ID, record version, and transaction status) stored along with the actual records to handle transactions properly. + +Thus, along with any columns that the application requires, additional columns for the metadata need to be defined in the schema. Additionally, this tool creates a table with the metadata if you use the Consensus Commit transaction manager. + +## Use Schema Loader in your application + +You can check the version of Schema Loader from the [Maven Central Repository](https://mvnrepository.com/artifact/com.scalar-labs/scalardb-schema-loader). For example in Gradle, you can add the following dependency to your `build.gradle` file, replacing `` with the version of Schema Loader that you want to use: -## Using Schema Loader in your program -You can check the version of `schema-loader` from [maven central repository](https://mvnrepository.com/artifact/com.scalar-labs/scalardb-schema-loader). -For example in Gradle, you can add the following dependency to your build.gradle. Please replace the `` with the version you want to use. ```gradle dependencies { - implementation 'com.scalar-labs:scalardb-schema-loader:' + implementation 'com.scalar-labs:scalardb-schema-loader:' } ``` -### Create, alter, repair and delete +### Create, alter, repair, or delete tables -You can create, alter, delete and repair tables that are defined in the schema using SchemaLoader by -simply passing ScalarDB configuration file, schema, and additional options if needed as shown -below. +You can create, alter, delete, or repair tables that are defined in the schema by using Schema Loader. To do this, you can pass a ScalarDB properties file, schema, and additional options, if needed, as shown below: ```java public class SchemaLoaderSample { public static int main(String... args) throws SchemaLoaderException { Path configFilePath = Paths.get("database.properties"); - // "sample_schema.json" and "altered_sample_schema.json" can be found in the "/sample" directory + // "sample_schema.json" and "altered_sample_schema.json" can be found in the "/sample" directory. Path schemaFilePath = Paths.get("sample_schema.json"); Path alteredSchemaFilePath = Paths.get("altered_sample_schema.json"); - boolean createCoordinatorTables = true; // whether to create the coordinator tables or not - boolean deleteCoordinatorTables = true; // whether to delete the coordinator tables or not - boolean repairCoordinatorTables = true; // whether to repair the coordinator tables or not + boolean createCoordinatorTables = true; // whether to create the Coordinator table or not + boolean deleteCoordinatorTables = true; // whether to delete the Coordinator table or not + boolean repairCoordinatorTables = true; // whether to repair the Coordinator table or not Map tableCreationOptions = new HashMap<>(); @@ -499,16 +666,16 @@ public class SchemaLoaderSample { Map tableReparationOptions = new HashMap<>(); indexCreationOptions.put(DynamoAdmin.NO_BACKUP, "true"); - // Create tables + // Create tables. SchemaLoader.load(configFilePath, schemaFilePath, tableCreationOptions, createCoordinatorTables); - // Alter tables + // Alter tables. SchemaLoader.alterTables(configFilePath, alteredSchemaFilePath, indexCreationOptions); - // Repair tables + // Repair tables. SchemaLoader.repairTables(configFilePath, schemaFilePath, tableReparationOptions, repairCoordinatorTables); - // Delete tables + // Delete tables. SchemaLoader.unload(configFilePath, schemaFilePath, deleteCoordinatorTables); return 0; @@ -516,33 +683,34 @@ public class SchemaLoaderSample { } ``` -You can also create, delete or repair a schema by passing a serialized schema JSON string (the raw text of a schema file). +You can also create, delete, or repair a schema by passing a serialized-schema JSON string (the raw text of a schema file) as shown below: + ```java -// Create tables +// Create tables. SchemaLoader.load(configFilePath, serializedSchemaJson, tableCreationOptions, createCoordinatorTables); -// Alter tables +// Alter tables. SchemaLoader.alterTables(configFilePath, serializedAlteredSchemaFilePath, indexCreationOptions); -// Repair tables +// Repair tables. SchemaLoader.repairTables(configFilePath, serializedSchemaJson, tableReparationOptions, repairCoordinatorTables); -// Delete tables +// Delete tables. SchemaLoader.unload(configFilePath, serializedSchemaJson, deleteCoordinatorTables); ``` -For ScalarDB configuration, a `Properties` object can be used as well. +When configuring ScalarDB, you can use a `Properties` object as well, as shown below: ```java -// Create tables +// Create tables. SchemaLoader.load(properties, serializedSchemaJson, tableCreationOptions, createCoordinatorTables); -// Alter tables +// Alter tables. SchemaLoader.alterTables(properties, serializedAlteredSchemaFilePath, indexCreationOptions); -// Repair tables +// Repair tables. SchemaLoader.repairTables(properties, serializedSchemaJson, tableReparationOptions, repairCoordinatorTables); -// Delete tables +// Delete tables. SchemaLoader.unload(properties, serializedSchemaJson, deleteCoordinatorTables); ``` diff --git a/docs/latest/scalardb-analytics-postgresql/scalardb-fdw.md b/docs/latest/scalardb-analytics-postgresql/scalardb-fdw.md new file mode 100644 index 00000000..78fe9787 --- /dev/null +++ b/docs/latest/scalardb-analytics-postgresql/scalardb-fdw.md @@ -0,0 +1,174 @@ +# ScalarDB FDW + +ScalarDB FDW is a PostgreSQL extension that implements a foreign data wrapper (FDW) for [ScalarDB](https://www.scalar-labs.com/scalardb/). + +ScalarDB FDW uses the Java Native Interface to directly utilize ScalarDB as a library inside the FDW and read data from external databases via scan operations for ScalarDB. + +## Prerequisites + +You must have the following prerequisites set up in your environment. + +### JDK + +You must install a version of the Java Development Kit (JDK) that is compatible with ScalarDB. In addition, you must set the `JAVA_HOME` environment variable, which points to your JDK installation directory. + +Note that since these extensions use the Java Native Interface (JNI) internally, you must include the dynamic library of the Java virtual machine (JVM), such as `libjvm.so`, in the library search path. + +### PostgreSQL + +This extension supports PostgreSQL 13 or later. For details on how to install PostgreSQL, see the official documentation at [Server Administration](https://www.postgresql.org/docs/current/admin.html). + +## Build and installation + +You can build and install this extension by running the following command. + +```console +make install +``` + +### Common build errors + +This section describes some common build errors that you might encounter. + +#### ld: library not found for -ljvm + +Normally, the build script finds the path for `libjvm.so` and properly sets it as a library search path. However, if you encounter the error `ld: library not found for -ljvm`, please copy the `libjvm.so` file to the default library search path. For example: + +```console +ln -s //libjvm.so /usr/lib64/libjvm.so +``` + +## Usage + +This section provides a usage example and available options for FDW for ScalarDB. + +### Example + +The following example shows you how to install and create the necessary components, and then run a query by using the FDW extension. + +#### 1. Install the extension + +For details on how to install the extension, see the [Build and installation](#build-and-installation) section. + +#### 2. Create an extension + +To create an extension, run the following command: + +```sql +CREATE EXTENSION scalardb_fdw; +``` + +#### 3. Create a foreign server + +To create a foreign server, run the following command: + +```sql +CREATE SERVER scalardb FOREIGN DATA WRAPPER scalardb_fdw OPTIONS ( + config_file_path '/path/to/scalardb.properties' +); +``` + +#### 4. Create user mapping + +To create user mapping, run the following command: + +```sql +CREATE USER MAPPING FOR PUBLIC SERVER scalardb; +``` + +#### 5. Create a foreign table + +To create a foreign table, run the following command: + +```sql +CREATE FOREIGN TABLE sample_table ( + pk int, + ck1 int, + ck2 int, + boolean_col boolean, + bigint_col bigint, + float_col double precision, + double_col double precision, + text_col text, + blob_col bytea +) SERVER scalardb OPTIONS ( + namespace 'ns', + table_name 'sample_table' +); +``` + +#### 6. Run a query + +To run a query, run the following command: + +```sql +select * from sample_table; +``` + +### Available options + +You can set the following options for ScalarDB FDW objects. + +#### `CREATE SERVER` + +You can set the following options on a ScalarDB foreign server object: + +| Name | Required | Type | Description | +| ------------------ | -------- | -------- | --------------------------------------------------------------- | +| `config_file_path` | **Yes** | `string` | The path to the ScalarDB config file. | +| `max_heap_size` | No | `string` | The maximum heap size of JVM. The format is the same as `-Xmx`. | + +#### `CREATE USER MAPPING` + +Currently, no options exist for `CREATE USER MAPPING`. + +#### `CREATE FOREIGN SERVER` + +The following options can be set on a ScalarDB foreign table object: + +| Name | Required | Type | Description | +| ------------ | -------- | -------- | ---------------------------------------------------------------- | +| `namespace` | **Yes** | `string` | The name of the namespace of the table in the ScalarDB instance. | +| `table_name` | **Yes** | `string` | The name of the table in the ScalarDB instance. | + +### Data-type mapping + +| ScalarDB | PostgreSQL | +| -------- | ---------------- | +| BOOLEAN | boolean | +| INT | int | +| BIGINT | bigint | +| FLOAT | float | +| DOUBLE | double precision | +| TEXT | text | +| BLOB | bytea | + +## Testing + +This section describes how to test FDW for ScalarDB. + +### Set up a ScalarDB instance for testing + +Before testing FDW for ScalarDB, you must have a running ScalarDB instance that contains test data. You can set up the instance and load the test data by running the following commands: + +```console +./test/setup.sh +``` + +If you want to reset the instances, you can run the following command, then the above setup command again. + +```console +./test/cleanup.sh +``` + +### Run regression tests + +You can run regression tests by running the following command **after** you have installed the FDW extension. + +```console +make installcheck +``` + +## Limitations + +- This extension aims to enable analytical query processing on ScalarDB-managed databases. Therefore, this extension only supports reading data from ScalarDB. diff --git a/docs/latest/scalardb-analytics-postgresql/schema-importer.md b/docs/latest/scalardb-analytics-postgresql/schema-importer.md new file mode 100644 index 00000000..d25ae5c3 --- /dev/null +++ b/docs/latest/scalardb-analytics-postgresql/schema-importer.md @@ -0,0 +1,60 @@ +# Schema Importer + +Schema Importer is a CLI tool for automatically configuring PostgreSQL. By using this tool, your PostgreSQL database can have identical database objects, such as namespaces and tables, as your ScalarDB instance. + +Schema Importer reads the ScalarDB configuration file, retrieves the schemas of the tables defined in ScalarDB, and creates the corresponding foreign data wrapper external tables and views in that order. For more information, refer to [Getting Started with ScalarDB Analytics with PostgreSQL](getting-started.md). + +## Build Schema Importer + +You can build Schema Importer by using [Gradle](https://gradle.org/). To build Schema Importer, run the following command: + +```console +./gradlew build +``` + +You may want to build a fat JAR file so that you can launch Schema Importer by using `java -jar`. To build the fat JAR, run the following command: + + ```console + ./gradlew shadowJar + ``` + +After you build the fat JAR, you can find the fat JAR file in the `app/build/libs/` directory. + +## Run Schema Importer + +To run Schema Importer by using the fat JAR file, run the following command: + +```console +java -jar +``` +Available options are as follows: + +| Name | Required | Description | Default | +| --------------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------ | +| `--config` | **Yes** | Path to the ScalarDB configuration file | | +| `--config-on-postgres-host` | No | Path to the ScalarDB configuration file on the PostgreSQL-running host | The same value as `--config` will be used. | +| `--namespace`, `-n` | **Yes** | Namespaces to import into the analytics instance. You can specify the `--namespace` option multiple times if you have two or more namespaces. | | +| `--host` | No | PostgreSQL host | localhost | +| `--port` | No | PostgreSQL port | 5432 | +| `--database` | No | PostgreSQL port | postgres | +| `--user` | No | PostgreSQL user | postgres | +| `--password` | No | PostgreSQL password | | +| `--debug` | No | Enable debug mode | | + + +## Test Schema Importer + +To test Schema Importer, run the following command: + +```console +./gradlew test +``` + +## Build a Docker image of Schema Importer + + +To build a Docker image of Schema Importer, run the following command, replacing `` with the tag version of Schema Importer that you want to use: + +```console +docker build -t ghcr.io/scalar-labs/scalardb-analytics-postgresql-schema-importer: -f ./app/Dockerfile . +``` diff --git a/docs/latest/schema-loader.md b/docs/latest/schema-loader.md index c20dcc4d..7c981fc9 100644 --- a/docs/latest/schema-loader.md +++ b/docs/latest/schema-loader.md @@ -1,50 +1,66 @@ # ScalarDB Schema Loader -ScalarDB has its own data model and schema, that maps to the implementation specific data model and schema. -Also, it stores internal metadata (e.g., transaction ID, record version, transaction status) for managing transaction logs and statuses when you use the Consensus Commit transaction manager. -It is a little hard for application developers to manage the schema mapping and metadata for transactions, so we offer a tool called ScalarDB Schema Loader for creating schema without requiring much knowledge about those. +ScalarDB has its own data model and schema that maps to the implementation-specific data model and schema. In addition, ScalarDB stores internal metadata, such as transaction IDs, record versions, and transaction statuses, to manage transaction logs and statuses when you use the Consensus Commit transaction manager. -There are two ways to specify general CLI options in Schema Loader: - - Pass a ScalarDB configuration file and database/storage-specific options additionally. - - Pass the options without a ScalarDB configuration (Deprecated). +Since managing the schema mapping and metadata for transactions can be difficult, you can use ScalarDB Schema Loader, which is a tool to create schemas that doesn't require you to need in-depth knowledge about schema mapping or metadata. -Note that this tool supports only basic options to create/delete/repair/alter a table. If you want -to use the advanced features of a database, please alter your tables with a database specific tool after creating them with this tool. +You have two options to specify general CLI options in Schema Loader: -# Usage +- Pass the ScalarDB properties file and database-specific or storage-specific options. +- Pass database-specific or storage-specific options without the ScalarDB properties file. (Deprecated) -## Install +{% capture notice--info %} +**Note** -The release versions of `schema-loader` can be downloaded from [releases](https://github.com/scalar-labs/scalardb/releases) page of ScalarDB. +This tool supports only basic options to create, delete, repair, or alter a table. If you want to use the advanced features of a database, you must alter your tables with a database-specific tool after creating the tables with this tool. +{% endcapture %} -## Build +
{{ notice--info | markdownify }}
-In case you want to build `schema-loader` from the source: -```console -$ ./gradlew schema-loader:shadowJar -``` -- The built fat jar file is `schema-loader/build/libs/scalardb-schema-loader-.jar` +## Set up Schema Loader -## Docker +Select your preferred method to set up Schema Loader, and follow the instructions. -You can pull the docker image from [Scalar's container registry](https://github.com/orgs/scalar-labs/packages/container/package/scalardb-schema-loader). -```console -docker run --rm -v : [-v :] ghcr.io/scalar-labs/scalardb-schema-loader: -``` -- Note that you can specify the same command arguments even if you use the fat jar or the container. The example commands in the next section are shown with a jar, but you can run the commands with the container in the same way by replacing `java -jar scalardb-schema-loader-.jar` with `docker run --rm -v : [-v :] ghcr.io/scalar-labs/scalardb-schema-loader:`. +
+
+ + +
+ +
+ +You can download the release versions of Schema Loader from the [ScalarDB Releases](https://github.com/scalar-labs/scalardb/releases) page. +
+
+ +You can pull the Docker image from the [Scalar container registry](https://github.com/orgs/scalar-labs/packages/container/package/scalardb-schema-loader) by running the following command, replacing the contents in the angle brackets as described: -You can also build the docker image as follows. ```console -$ ./gradlew schema-loader:docker +$ docker run --rm -v : [-v :] ghcr.io/scalar-labs/scalardb-schema-loader: ``` -## Run +{% capture notice--info %} +**Note** + +You can specify the same command arguments even if you use the fat JAR or the container. In the [Available commands](#available-commands) section, the JAR is used, but you can run the commands by using the container in the same way by replacing `java -jar scalardb-schema-loader-.jar` with `docker run --rm -v : [-v :] ghcr.io/scalar-labs/scalardb-schema-loader:`. +{% endcapture %} + +
{{ notice--info | markdownify }}
+
+
+ +## Run Schema Loader + +This section explains how to run Schema Loader. ### Available commands -For using a config file: +Select how you would like to configure Schema Loader for your database. The preferred method is to use the properties file since other, database-specific methods are deprecated. + +The following commands are available when using the properties file: + ```console -Usage: java -jar scalardb-schema-loader-.jar [-D] [--coordinator] +Usage: java -jar scalardb-schema-loader-.jar [-D] [--coordinator] [--no-backup] [--no-scaling] -c= [--compaction-strategy=] [-f=] [--replication-factor=] @@ -60,7 +76,7 @@ Create/Delete schemas in the storage defined in the config file --compaction-strategy= The compaction strategy, must be LCS, STCS or TWCS (supported in Cassandra) - --coordinator Create/delete/repair coordinator tables + --coordinator Create/delete/repair Coordinator tables -D, --delete-all Delete tables -f, --schema-file= Path to the schema json file @@ -78,9 +94,57 @@ Create/Delete schemas in the storage defined in the config file --ru= Base resource unit (supported in DynamoDB, Cosmos DB) ``` -For Cosmos DB for NoSQL (Deprecated. Please use the command using a config file instead): +For a sample properties file, see [`database.properties`](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties). + +{% capture notice--info %} +**Note** + +The following database-specific methods have been deprecated. Please use the [commands for configuring the properties file](#available-commands) instead. + +
+
+ + + + +
+ +
+ +```console +Usage: java -jar scalardb-schema-loader-.jar --cassandra [-D] + [-c=] -f= -h= + [-n=] [-p=] [-P=] + [-R=] [-u=] +Create/Delete Cassandra schemas + -A, --alter Alter tables : it will add new columns and create/delete + secondary index for existing tables. It compares the + provided table schema to the existing schema to decide + which columns need to be added and which indexes need + to be created or deleted + -c, --compaction-strategy= + Cassandra compaction strategy, must be LCS, STCS or TWCS + -D, --delete-all Delete tables + -f, --schema-file= + Path to the schema json file + -h, --host= Cassandra host IP + -n, --network-strategy= + Cassandra network strategy, must be SimpleStrategy or + NetworkTopologyStrategy + -p, --password= + Cassandra password + -P, --port= Cassandra Port + -R, --replication-factor= + Cassandra replication factor + --repair-all Repair tables : it repairs the table metadata of + existing tables + -u, --user= Cassandra user +``` +
+
+ ```console -Usage: java -jar scalardb-schema-loader-.jar --cosmos [-D] +Usage: java -jar scalardb-schema-loader-.jar --cosmos [-D] [--no-scaling] -f= -h= -p= [-r=] Create/Delete Cosmos DB schemas -A, --alter Alter tables : it will add new columns and create/delete @@ -99,10 +163,11 @@ Create/Delete Cosmos DB schemas existing tables and repairs stored procedure attached to each table ``` +
+
-For DynamoDB (Deprecated. Please use the command using a config file instead): ```console -Usage: java -jar scalardb-schema-loader-.jar --dynamo [-D] +Usage: java -jar scalardb-schema-loader-.jar --dynamo [-D] [--no-backup] [--no-scaling] [--endpoint-override=] -f= -p= [-r=] --region= -u= @@ -127,41 +192,11 @@ Create/Delete DynamoDB schemas existing tables -u, --user= AWS access key ID ``` +
+
-For Cassandra (Deprecated. Please use the command using a config file instead): ```console -Usage: java -jar scalardb-schema-loader-.jar --cassandra [-D] - [-c=] -f= -h= - [-n=] [-p=] [-P=] - [-R=] [-u=] -Create/Delete Cassandra schemas - -A, --alter Alter tables : it will add new columns and create/delete - secondary index for existing tables. It compares the - provided table schema to the existing schema to decide - which columns need to be added and which indexes need - to be created or deleted - -c, --compaction-strategy= - Cassandra compaction strategy, must be LCS, STCS or TWCS - -D, --delete-all Delete tables - -f, --schema-file= - Path to the schema json file - -h, --host= Cassandra host IP - -n, --network-strategy= - Cassandra network strategy, must be SimpleStrategy or - NetworkTopologyStrategy - -p, --password= - Cassandra password - -P, --port= Cassandra Port - -R, --replication-factor= - Cassandra replication factor - --repair-all Repair tables : it repairs the table metadata of - existing tables - -u, --user= Cassandra user -``` - -For a JDBC database (Deprecated. Please use the command using a config file instead): -```console -Usage: java -jar scalardb-schema-loader-.jar --jdbc [-D] +Usage: java -jar scalardb-schema-loader-.jar --jdbc [-D] -f= -j= -p= -u= Create/Delete JDBC schemas -A, --alter Alter tables : it will add new columns and create/delete @@ -179,142 +214,234 @@ Create/Delete JDBC schemas existing tables -u, --user= JDBC user ``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
### Create namespaces and tables -For using a config file (Sample config file can be found [here](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties)): +To create namespaces and tables by using a properties file, run the following command, replacing the contents in the angle brackets as described: + ```console -$ java -jar scalardb-schema-loader-.jar --config -f schema.json [--coordinator] +$ java -jar scalardb-schema-loader-.jar --config -f [--coordinator] ``` - - if `--coordinator` is specified, the coordinator tables will be created. -For using CLI arguments fully for configuration (Deprecated. Please use the command using a config file instead): +If `--coordinator` is specified, a [Coordinator table](api-guide.md#specify-operations-for-the-coordinator-table) will be created. + +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
+ ```console -# For Cosmos DB for NoSQL -$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f schema.json [-r BASE_RESOURCE_UNIT] +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f [-n ] [-R ] ``` - - `` you can use a primary key or a secondary key. - - `-r BASE_RESOURCE_UNIT` is an option. You can specify the RU of each database. The maximum RU in tables in the database will be set. If you don't specify RU of tables, the database RU will be set with this option. By default, it's 400. + +- If `-P ` is not supplied, it defaults to `9042`. +- If `-u ` is not supplied, it defaults to `cassandra`. +- If `-p ` is not supplied, it defaults to `cassandra`. +- `` should be `SimpleStrategy` or `NetworkTopologyStrategy` +
+
```console -# For DynamoDB -$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f schema.json [-r BASE_RESOURCE_UNIT] +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f [-r BASE_RESOURCE_UNIT] ``` - - `` should be a string to specify an AWS region like `ap-northeast-1`. - - `-r` option is almost the same as Cosmos DB for NoSQL option. However, the unit means DynamoDB capacity unit. The read and write capacity units are set the same value. + +- `` you can use a primary key or a secondary key. +- `-r BASE_RESOURCE_UNIT` is an option. You can specify the RU of each database. The maximum RU in tables in the database will be set. If you don't specify RU of tables, the database RU will be set with this option. By default, it's 400. +
+
```console -# For Cassandra -$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f schema.json [-n ] [-R ] +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f [-r BASE_RESOURCE_UNIT] ``` - - If `-P ` is not supplied, it defaults to `9042`. - - If `-u ` is not supplied, it defaults to `cassandra`. - - If `-p ` is not supplied, it defaults to `cassandra`. - - `` should be `SimpleStrategy` or `NetworkTopologyStrategy` + +- `` should be a string to specify an AWS region like `ap-northeast-1`. +- `-r` option is almost the same as Cosmos DB for NoSQL option. However, the unit means DynamoDB capacity unit. The read and write capacity units are set the same value. +
+
```console -# For a JDBC database -$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f schema.json +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f ``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
### Alter tables -This command will add new columns and create/delete secondary index for existing tables. It compares -the provided table schema to the existing schema to decide which columns need to be added and which -indexes need to be created or deleted. +You can use a command to add new columns to and create or delete a secondary index for existing tables. This command compares the provided table schema to the existing schema to decide which columns need to be added and which indexes need to be created or deleted. -For using config file (Sample config file can be found [here](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties)): +To add new colums to and create or delete a secondary index for existing tables, run the following command, replacing the contents in the angle brackets as described: ```console -$ java -jar scalardb-schema-loader-.jar --config -f schema.json --alter +$ java -jar scalardb-schema-loader-.jar --config -f --alter ``` -For using CLI arguments fully for configuration (Deprecated. Please use the command using a config -file instead): +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
```console -# For Cosmos DB for NoSQL -$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f schema.json --alter +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f --alter ``` +
+
```console -# For DynamoDB -$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f schema.json --alter +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f --alter ``` +
+
```console -# For Cassandra -$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f schema.json --alter +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f --alter ``` +
+
```console -# For a JDBC database -$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f schema.json --alter +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f --alter ``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
### Delete tables -For using config file (Sample config file can be found [here](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties)): +You can delete tables by using the properties file. To delete tables, run the following command, replacing the contents in the angle brackets as described: + ```console -$ java -jar scalardb-schema-loader-.jar --config -f schema.json [--coordinator] -D +$ java -jar scalardb-schema-loader-.jar --config -f [--coordinator] -D ``` - - if `--coordinator` is specified, the coordinator tables will be deleted. - -For using CLI arguments fully for configuration (Deprecated. Please use the command using a config file instead): + +If `--coordinator` is specified, the Coordinator table will be deleted as well. + +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
+ ```console -# For Cosmos DB for NoSQL -$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f schema.json -D +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f -D ``` +
+
```console -# For DynamoDB -$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f schema.json -D +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f -D ``` +
+
```console -# For Cassandra -$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f schema.json -D +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region -f -D ``` +
+
```console -# For a JDBC database -$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f schema.json -D +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f -D ``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
### Repair tables -This command will repair the table metadata of existing tables. When using Cosmos DB for NoSQL, it additionally repairs stored procedure attached to each table. +You can repair the table metadata of existing tables by using the properties file. To repair table metadata of existing tables, run the following command, replacing the contents in the angle brackets as described: -For using config file (Sample config file can be found [here](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties)): ```console -$ java -jar scalardb-schema-loader-.jar --config -f schema.json [--coordinator] --repair-all +$ java -jar scalardb-schema-loader-.jar --config -f [--coordinator] --repair-all ``` -- if `--coordinator` is specified, the coordinator tables will be repaired as well. -For using CLI arguments fully for configuration (Deprecated. Please use the command using a config file instead): +If `--coordinator` is specified, the Coordinator table will be repaired as well. In addition, if you're using Cosmos DB for NoSQL, running this command will also repair stored procedures attached to each table. + +{% capture notice--info %} +**Note** + +The following database-specific CLI arguments have been deprecated. Please use the CLI arguments for configuring the properties file instead. + +
+
+ + + + +
+ +
+ ```console -# For Cosmos DB for NoSQL -$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f schema.json --repair-all +$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f --repair-all ``` +
+
```console -# For DynamoDB -$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region [--no-backup] -f schema.json --repair-all +$ java -jar scalardb-schema-loader-.jar --cosmos -h -p -f --repair-all ``` +
+
```console -# For Cassandra -$ java -jar scalardb-schema-loader-.jar --cassandra -h [-P ] [-u ] [-p ] -f schema.json --repair-all +$ java -jar scalardb-schema-loader-.jar --dynamo -u -p --region [--no-backup] -f --repair-all ``` +
+
```console -# For a JDBC database -$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f schema.json --repair-all +$ java -jar scalardb-schema-loader-.jar --jdbc -j -u -p -f --repair-all ``` +
+
+{% endcapture %} + +
{{ notice--info | markdownify }}
### Sample schema file -The sample schema is as follows (Sample schema file can be found [here](https://github.com/scalar-labs/scalardb/blob/master/schema-loader/sample/schema_sample.json)): +The following is a sample schema. For a sample schema file, see [`schema_sample.json`](https://github.com/scalar-labs/scalardb/blob/master/schema-loader/sample/schema_sample.json). ```json { @@ -379,14 +506,17 @@ The sample schema is as follows (Sample schema file can be found [here](https:// ``` The schema has table definitions that include `columns`, `partition-key`, `clustering-key`, `secondary-index`, and `transaction` fields. -The `columns` field defines columns of the table and their data types. -The `partition-key` field defines which columns the partition key is composed of, and `clustering-key` defines which columns the clustering key is composed of. -The `secondary-index` field defines which columns are indexed. -The `transaction` field indicates whether the table is for transactions or not. -If you set the `transaction` field to `true` or don't specify the `transaction` field, this tool creates a table with transaction metadata if needed. -If not, it creates a table without any transaction metadata (that is, for a table with [Storage API](storage-abstraction.md)). - -You can also specify database/storage-specific options in the table definition as follows: + +- The `columns` field defines columns of the table and their data types. +- The `partition-key` field defines which columns the partition key is composed of. +- The `clustering-key` field defines which columns the clustering key is composed of. +- The `secondary-index` field defines which columns are indexed. +- The `transaction` field indicates whether the table is for transactions or not. + - If you set the `transaction` field to `true` or don't specify the `transaction` field, this tool creates a table with transaction metadata if needed. + - If you set the `transaction` field to `false`, this tool creates a table without any transaction metadata (that is, for a table with [Storage API](storage-abstraction.md)). + +You can also specify database or storage-specific options in the table definition as follows: + ```json { "sample_db.sample_table3": { @@ -404,30 +534,68 @@ You can also specify database/storage-specific options in the table definition a } ``` -The database/storage-specific options you can specify are as follows: +The database or storage-specific options you can specify are as follows: -For Cassandra: -- `compaction-strategy`, a compaction strategy. It should be `STCS` (SizeTieredCompaction), `LCS` (LeveledCompactionStrategy) or `TWCS` (TimeWindowCompactionStrategy). +
+
+ + + + +
-For DynamoDB and Cosmos DB for NoSQL: -- `ru`, a request unit. Please see [RU](#ru) for the details. +
-## Scaling Performance +The `compaction-strategy` option is the compaction strategy used. This option should be `STCS` (SizeTieredCompaction), `LCS` (LeveledCompactionStrategy), or `TWCS` (TimeWindowCompactionStrategy). +
+
-### RU +The `ru` option stands for Request Units. For details, see [RUs](#rus). +
+
-You can scale the throughput of Cosmos DB for NoSQL and DynamoDB by specifying `--ru` option (which applies to all the tables) or `ru` parameter for each table. The default values are `400` for Cosmos DB for NoSQL and `10` for DynamoDB respectively, which are set without `--ru` option. +The `ru` option stands for Request Units. For details, see [RUs](#rus). +
+
-Note that the schema loader abstracts [Request Unit](https://docs.microsoft.com/azure/cosmos-db/request-units) of Cosmos DB for NoSQL and [Capacity Unit](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual) of DynamoDB with `RU`. -So, please set an appropriate value depending on the database implementations. Please also note that the schema loader sets the same value to both Read Capacity Unit and Write Capacity Unit for DynamoDB. +No options are available for JDBC databases. +
+
+ +## Scale for performance when using Cosmos DB for NoSQL or DynamoDB + +When using Cosmos DB for NoSQL or DynamoDB, you can scale by using Request Units (RUs) or auto-scaling. + +### RUs + +You can scale the throughput of Cosmos DB for NoSQL and DynamoDB by specifying the `--ru` option. When specifying this option, scaling applies to all tables or the `ru` parameter for each table. + +If the `--ru` option is not set, the default values will be `400` for Cosmos DB for NoSQL and `10` for DynamoDB. + +{% capture notice--info %} +**Note** + +- Schema Loader abstracts [Request Units](https://docs.microsoft.com/azure/cosmos-db/request-units) for Cosmos DB for NoSQL and [Capacity Units](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual) for DynamoDB with `RU`. Therefore, be sure to set an appropriate value depending on the database implementation. +- Be aware that Schema Loader sets the same value to both read capacity unit and write capacity unit for DynamoDB. +{% endcapture %} + +
{{ notice--info | markdownify }}
### Auto-scaling -By default, the schema loader enables auto-scaling of RU for all tables: RU is scaled in or out between 10% and 100% of a specified RU depending on a workload. For example, if you specify `-r 10000`, RU of each table is scaled in or out between 1000 and 10000. Note that auto-scaling of Cosmos DB for NoSQL is enabled only when you set more than or equal to 4000 RU. +By default, Schema Loader enables auto-scaling of RUs for all tables: RUs scale between 10 percent and 100 percent of a specified RU depending on the workload. For example, if you specify `-r 10000`, the RUs of each table auto-scales between `1000` and `10000`. -## Data type mapping between ScalarDB and the other databases +{% capture notice--info %} +**Note** -Here are the supported data types in ScalarDB and their mapping to the data types of other databases. +Auto-scaling for Cosmos DB for NoSQL is enabled only when this option is set to `4000` or more. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Data-type mapping between ScalarDB and other databases + +The following table shows the supported data types in ScalarDB and their mapping to the data types of other databases. | ScalarDB | Cassandra | Cosmos DB for NoSQL | DynamoDB | MySQL | PostgreSQL | Oracle | SQL Server | SQLite | |-----------|-----------|---------------------|----------|----------|------------------|----------------|-----------------|---------| @@ -439,48 +607,47 @@ Here are the supported data types in ScalarDB and their mapping to the data type | TEXT | text | string (JSON) | S | longtext | text | varchar2(4000) | varchar(8000) | text | | BLOB | blob | string (JSON) | B | longblob | bytea | RAW(2000) | varbinary(8000) | blob | -However, the following types in JDBC databases are converted differently when they are used as a primary key or a secondary index key due to the limitations of RDB data types. +However, the following data types in JDBC databases are converted differently when they are used as a primary key or a secondary index key. This is due to the limitations of RDB data types. | ScalarDB | MySQL | PostgreSQL | Oracle | |----------|---------------|-------------------|--------------| | TEXT | VARCHAR(64) | VARCHAR(10485760) | VARCHAR2(64) | | BLOB | VARBINARY(64) | | RAW(64) | -The value range of `BIGINT` in ScalarDB is from -2^53 to 2^53 regardless of the underlying database. +The value range of `BIGINT` in ScalarDB is from -2^53 to 2^53, regardless of the underlying database. -If this data type mapping doesn't match your application, please alter the tables to change the data types after creating them with this tool. +If this data-type mapping doesn't match your application, please alter the tables to change the data types after creating them by using this tool. ## Internal metadata for Consensus Commit -The Consensus Commit transaction manager manages metadata (e.g., transaction ID, record version, transaction status) stored along with the actual records to handle transactions properly. -Thus, along with any required columns by the application, additional columns for the metadata need to be defined in the schema. -Additionaly, this tool creates a table with the metadata when you use the Consensus Commit transaction manager. +The Consensus Commit transaction manager manages metadata (for example, transaction ID, record version, and transaction status) stored along with the actual records to handle transactions properly. + +Thus, along with any columns that the application requires, additional columns for the metadata need to be defined in the schema. Additionally, this tool creates a table with the metadata if you use the Consensus Commit transaction manager. + +## Use Schema Loader in your application + +You can check the version of Schema Loader from the [Maven Central Repository](https://mvnrepository.com/artifact/com.scalar-labs/scalardb-schema-loader). For example in Gradle, you can add the following dependency to your `build.gradle` file, replacing `` with the version of Schema Loader that you want to use: -## Using Schema Loader in your program -You can check the version of `schema-loader` from [maven central repository](https://mvnrepository.com/artifact/com.scalar-labs/scalardb-schema-loader). -For example in Gradle, you can add the following dependency to your build.gradle. Please replace the `` with the version you want to use. ```gradle dependencies { - implementation 'com.scalar-labs:scalardb-schema-loader:' + implementation 'com.scalar-labs:scalardb-schema-loader:' } ``` -### Create, alter, repair and delete +### Create, alter, repair, or delete tables -You can create, alter, delete and repair tables that are defined in the schema using SchemaLoader by -simply passing ScalarDB configuration file, schema, and additional options if needed as shown -below. +You can create, alter, delete, or repair tables that are defined in the schema by using Schema Loader. To do this, you can pass a ScalarDB properties file, schema, and additional options, if needed, as shown below: ```java public class SchemaLoaderSample { public static int main(String... args) throws SchemaLoaderException { Path configFilePath = Paths.get("database.properties"); - // "sample_schema.json" and "altered_sample_schema.json" can be found in the "/sample" directory + // "sample_schema.json" and "altered_sample_schema.json" can be found in the "/sample" directory. Path schemaFilePath = Paths.get("sample_schema.json"); Path alteredSchemaFilePath = Paths.get("altered_sample_schema.json"); - boolean createCoordinatorTables = true; // whether to create the coordinator tables or not - boolean deleteCoordinatorTables = true; // whether to delete the coordinator tables or not - boolean repairCoordinatorTables = true; // whether to repair the coordinator tables or not + boolean createCoordinatorTables = true; // whether to create the Coordinator table or not + boolean deleteCoordinatorTables = true; // whether to delete the Coordinator table or not + boolean repairCoordinatorTables = true; // whether to repair the Coordinator table or not Map tableCreationOptions = new HashMap<>(); @@ -499,16 +666,16 @@ public class SchemaLoaderSample { Map tableReparationOptions = new HashMap<>(); indexCreationOptions.put(DynamoAdmin.NO_BACKUP, "true"); - // Create tables + // Create tables. SchemaLoader.load(configFilePath, schemaFilePath, tableCreationOptions, createCoordinatorTables); - // Alter tables + // Alter tables. SchemaLoader.alterTables(configFilePath, alteredSchemaFilePath, indexCreationOptions); - // Repair tables + // Repair tables. SchemaLoader.repairTables(configFilePath, schemaFilePath, tableReparationOptions, repairCoordinatorTables); - // Delete tables + // Delete tables. SchemaLoader.unload(configFilePath, schemaFilePath, deleteCoordinatorTables); return 0; @@ -516,33 +683,34 @@ public class SchemaLoaderSample { } ``` -You can also create, delete or repair a schema by passing a serialized schema JSON string (the raw text of a schema file). +You can also create, delete, or repair a schema by passing a serialized-schema JSON string (the raw text of a schema file) as shown below: + ```java -// Create tables +// Create tables. SchemaLoader.load(configFilePath, serializedSchemaJson, tableCreationOptions, createCoordinatorTables); -// Alter tables +// Alter tables. SchemaLoader.alterTables(configFilePath, serializedAlteredSchemaFilePath, indexCreationOptions); -// Repair tables +// Repair tables. SchemaLoader.repairTables(configFilePath, serializedSchemaJson, tableReparationOptions, repairCoordinatorTables); -// Delete tables +// Delete tables. SchemaLoader.unload(configFilePath, serializedSchemaJson, deleteCoordinatorTables); ``` -For ScalarDB configuration, a `Properties` object can be used as well. +When configuring ScalarDB, you can use a `Properties` object as well, as shown below: ```java -// Create tables +// Create tables. SchemaLoader.load(properties, serializedSchemaJson, tableCreationOptions, createCoordinatorTables); -// Alter tables +// Alter tables. SchemaLoader.alterTables(properties, serializedAlteredSchemaFilePath, indexCreationOptions); -// Repair tables +// Repair tables. SchemaLoader.repairTables(properties, serializedSchemaJson, tableReparationOptions, repairCoordinatorTables); -// Delete tables +// Delete tables. SchemaLoader.unload(properties, serializedSchemaJson, deleteCoordinatorTables); ``` From 03faba4e0fbb9ad83a7ce80a14b77133d20375bb Mon Sep 17 00:00:00 2001 From: josh-wong Date: Tue, 14 Nov 2023 11:12:12 +0900 Subject: [PATCH 2/2] Update side navigation Update titles in the side navigation for consistency (title-style capitalization and to match actual doc titles), and add missing docs. --- _data/navigation.yml | 138 ++++++++++++++++++++++++------------------- 1 file changed, 76 insertions(+), 62 deletions(-) diff --git a/_data/navigation.yml b/_data/navigation.yml index 7785f866..8e828030 100644 --- a/_data/navigation.yml +++ b/_data/navigation.yml @@ -97,9 +97,9 @@ versions: # Develop docs - title: "Develop" children: - - title: "Add ScalarDB to your build" + - title: "Add ScalarDB to Your Build" url: /docs/latest/add-scalardb-to-your-build/ - - title: "Add ScalarDB SQL to your build" + - title: "Add ScalarDB SQL to Your Build" url: /docs/latest/scalardb-sql/add-scalardb-sql-to-your-build/ - title: "Developer Guides for ScalarDB" url: /docs/latest/guides/ @@ -107,13 +107,17 @@ versions: url: /docs/latest/development-configurations/ - title: "ScalarDB Schema Loader" url: /docs/latest/schema-loader/ + - title: "ScalarDB FDW" + url: /docs/latest/scalardb-analytics-postgresql/scalardb-fdw/ + - title: "Schema Importer" + url: /docs/latest/scalardb-analytics-postgresql/schema-importer/ # - title: "Export Function for ScalarDB Data Loader" # May be added in the near future. # url: /docs/latest/scalardb-data-loader/getting-started-export/ # - title: "Import Function for ScalarDB Data Loader" # May be added in the near future. # url: /docs/latest/scalardb-data-loader/getting-started-import/ - title: "How to Run ScalarDB GraphQL Server" url: /docs/latest/scalardb-graphql/how-to-run-server/ - - title: "How to Run Two-Phase Commit Transaction" + - title: "How to Run Two-Phase Commit Transaction by Using ScalarDB GraphQL" url: /docs/latest/scalardb-graphql/how-to-run-two-phase-commit-transaction/ - title: "ScalarDB SQL Command Line Interface" url: /docs/latest/scalardb-sql/command-line-interface/ @@ -126,13 +130,13 @@ versions: url: /docs/latest/helm-charts/getting-started-scalar-helm-charts/ - title: "Set Up ScalarDB Cluster on Kubernetes by Using a Helm Chart" url: /docs/latest/scalardb-cluster/setup-scalardb-cluster-on-kubernetes-by-using-helm-chart/ - - title: "Configure a custom values file for Scalar Helm Charts" + - title: "Configure a Custom Values File for Scalar Helm Charts" url: /docs/latest/helm-charts/configure-custom-values-file/ - - title: "Deploy Scalar products using Scalar Helm Charts" + - title: "Deploy Scalar Products Using Scalar Helm Charts" url: /docs/latest/helm-charts/how-to-deploy-scalar-products/ - - title: "Mount any files or volumes on Scalar product pods" + - title: "Mount Any Files or Volumes on Scalar Product Pods" url: /docs/latest/helm-charts/mount-files-or-volumes-on-scalar-pods/ - - title: "How to use Secret resources to pass the credentials as the environment variables into the properties file" + - title: "How to Use Secret Resources to Pass Credentials as Environment Variables into the Properties File" url: /docs/latest/helm-charts/use-secret-for-credentials/ - title: "How to Install ScalarDB Analytics with PostgreSQL in Your Local Environment by Using Docker" url: /docs/latest/scalardb-analytics-postgresql/installation/ @@ -143,7 +147,7 @@ versions: # Manage docs - title: "Manage" children: - - title: "How to Backup and Restore Databases Used Through ScalarDB" + - title: "How to Back Up and Restore Databases Used Through ScalarDB" url: /docs/latest/backup-restore/ - title: "Managing ScalarDB on Managed Kubernetes Services" url: /docs/latest/scalar-kubernetes/manage-kubernetes/ @@ -159,9 +163,9 @@ versions: url: /docs/latest/design/ - title: "ScalarDB Supported Databases" url: /docs/latest/scalardb-supported-databases/ - - title: "Requirements in the Underlying Databases of ScalarDB" + - title: "Requirements and Recommendations for the Underlying Databases of ScalarDB" url: /docs/latest/requirements/ - - title: "Storage Abstraction" + - title: "Storage Abstraction and API Guide" url: /docs/latest/storage-abstraction/ - title: "ScalarDB Benchmarks" url: /docs/latest/scalardb-benchmarks/README/ @@ -235,9 +239,9 @@ versions: # Develop docs - title: "Develop" children: - - title: "Add ScalarDB to your build" + - title: "Add ScalarDB to Your Build" url: /docs/3.9/add-scalardb-to-your-build/ - - title: "Add ScalarDB SQL to your build" + - title: "Add ScalarDB SQL to Your Build" url: /docs/3.9/scalardb-sql/add-scalardb-sql-to-your-build/ - title: "Developer Guides for ScalarDB" url: /docs/3.9/guides/ @@ -245,13 +249,17 @@ versions: url: /docs/3.9/development-configurations/ - title: "ScalarDB Schema Loader" url: /docs/3.9/schema-loader/ + - title: "ScalarDB FDW" + url: /docs/3.9/scalardb-analytics-postgresql/scalardb-fdw/ + - title: "Schema Importer" + url: /docs/3.9/scalardb-analytics-postgresql/schema-importer/ # - title: "Export Function for ScalarDB Data Loader" # May be added in the near future. # url: /docs/3.9/scalardb-data-loader/getting-started-export/ # - title: "Import Function for ScalarDB Data Loader" # May be added in the near future. # url: /docs/3.9/scalardb-data-loader/getting-started-import/ - title: "How to Run ScalarDB GraphQL Server" url: /docs/3.9/scalardb-graphql/how-to-run-server/ - - title: "How to Run Two-Phase Commit Transaction" + - title: "How to Run Two-Phase Commit Transaction by Using ScalarDB GraphQL" url: /docs/3.9/scalardb-graphql/how-to-run-two-phase-commit-transaction/ - title: "ScalarDB SQL Command Line Interface" url: /docs/3.9/scalardb-sql/command-line-interface/ @@ -264,13 +272,13 @@ versions: url: /docs/3.9/helm-charts/getting-started-scalar-helm-charts/ - title: "Set Up ScalarDB Cluster on Kubernetes by Using a Helm Chart" url: /docs/3.9/scalardb-cluster/setup-scalardb-cluster-on-kubernetes-by-using-helm-chart/ - - title: "Configure a custom values file for Scalar Helm Charts" + - title: "Configure a Custom Values File for Scalar Helm Charts" url: /docs/3.9/helm-charts/configure-custom-values-file/ - - title: "Deploy Scalar products using Scalar Helm Charts" + - title: "Deploy Scalar Products Using Scalar Helm Charts" url: /docs/3.9/helm-charts/how-to-deploy-scalar-products/ - - title: "Mount any files or volumes on Scalar product pods" + - title: "Mount Any Files or Volumes on Scalar Product Pods" url: /docs/3.9/helm-charts/mount-files-or-volumes-on-scalar-pods/ - - title: "How to use Secret resources to pass the credentials as the environment variables into the properties file" + - title: "How to Use Secret Resources to Pass Credentials as Environment Variables into the Properties File" url: /docs/3.9/helm-charts/use-secret-for-credentials/ - title: "How to Install ScalarDB Analytics with PostgreSQL in Your Local Environment by Using Docker" url: /docs/3.9/scalardb-analytics-postgresql/installation/ @@ -281,7 +289,7 @@ versions: # Manage docs - title: "Manage" children: - - title: "How to Backup and Restore Databases Used Through ScalarDB" + - title: "How to Back Up and Restore Databases Used Through ScalarDB" url: /docs/3.9/backup-restore/ - title: "Managing ScalarDB on Managed Kubernetes Services" url: /docs/3.9/scalar-kubernetes/manage-kubernetes/ @@ -297,9 +305,9 @@ versions: url: /docs/3.9/design/ - title: "ScalarDB Supported Databases" url: /docs/3.9/scalardb-supported-databases/ - - title: "Requirements in the Underlying Databases of ScalarDB" + - title: "Requirements and Recommendations for the Underlying Databases of ScalarDB" url: /docs/3.9/requirements/ - - title: "Storage Abstraction" + - title: "Storage Abstraction and API Guide" url: /docs/3.9/storage-abstraction/ - title: "ScalarDB Benchmarks" url: /docs/3.9/scalardb-benchmarks/README/ @@ -377,7 +385,7 @@ versions: # Develop docs - title: "Develop" children: - - title: "Add ScalarDB to your build" + - title: "Add ScalarDB to Your Build" url: /docs/3.8/add-scalardb-to-your-build/ - title: "Developer Guides for ScalarDB" url: /docs/3.8/guides/ @@ -389,7 +397,7 @@ versions: url: /docs/3.8/scalardb-data-loader/getting-started-export/ - title: "Import Function for ScalarDB Data Loader" url: /docs/3.8/scalardb-data-loader/getting-started-import/ - - title: "How to Run Two-Phase Commit Transaction" + - title: "How to Run Two-Phase Commit Transaction by Using ScalarDB GraphQL" url: /docs/3.8/scalardb-graphql/how-to-run-two-phase-commit-transaction/ - title: "How to Run ScalarDB GraphQL Server" url: /docs/3.8/scalardb-graphql/how-to-run-server/ @@ -408,13 +416,13 @@ versions: url: /docs/3.8/scalardb-server/ - title: "Getting Started with Helm Charts (ScalarDB Server)" url: /docs/3.8/helm-charts/getting-started-scalardb/ - - title: "Configure a custom values file for Scalar Helm Charts" + - title: "Configure a Custom Values File for Scalar Helm Charts" url: /docs/3.8/helm-charts/configure-custom-values-file/ - - title: "Deploy Scalar products using Scalar Helm Charts" + - title: "Deploy Scalar Products Using Scalar Helm Charts" url: /docs/3.8/helm-charts/how-to-deploy-scalar-products/ - - title: "Mount any files or volumes on Scalar product pods" + - title: "Mount Any Files or Volumes on Scalar Product Pods" url: /docs/3.8/helm-charts/mount-files-or-volumes-on-scalar-pods/ - - title: "How to use Secret resources to pass the credentials as the environment variables into the properties file" + - title: "How to Use Secret Resources to Pass Credentials as Environment Variables into the Properties File" url: /docs/3.8/helm-charts/use-secret-for-credentials/ - title: "ScalarDB GraphQL Deployment Guide on AWS" url: /docs/3.8/scalardb-graphql/aws-deployment-guide/ @@ -423,7 +431,7 @@ versions: # Manage docs - title: "Manage" children: - - title: "How to Backup and Restore Databases Used Through ScalarDB" + - title: "How to Back Up and Restore Databases Used Through ScalarDB" url: /docs/3.8/backup-restore/ - title: "Managing ScalarDB on Managed Kubernetes Services" url: /docs/3.8/scalar-kubernetes/manage-kubernetes/ @@ -439,9 +447,9 @@ versions: url: /docs/3.8/design/ - title: "ScalarDB Supported Databases" url: /docs/3.8/scalardb-supported-databases/ - - title: "Requirements in the Underlying Databases of ScalarDB" + - title: "Requirements and Recommendations for the Underlying Databases of ScalarDB" url: /docs/3.8/requirements/ - - title: "Storage Abstraction" + - title: "Storage Abstraction and API Guide" url: /docs/3.8/storage-abstraction/ - title: "ScalarDB Benchmarks" url: /docs/3.8/scalardb-benchmarks/README/ @@ -519,17 +527,19 @@ versions: # Develop docs - title: "Develop" children: - - title: "Add ScalarDB to your build" + - title: "Add ScalarDB to Your Build" url: /docs/3.7/add-scalardb-to-your-build/ - title: "Developer Guides for ScalarDB" url: /docs/3.7/guides/ - title: "Configuration Guides for ScalarDB" url: /docs/3.7/development-configurations/ + - title: "ScalarDB Schema Loader" + url: /docs/3.7/schema-loader/ - title: "Export Function for ScalarDB Data Loader" url: /docs/3.7/scalardb-data-loader/getting-started-export/ - title: "Import Function for ScalarDB Data Loader" url: /docs/3.7/scalardb-data-loader/getting-started-import/ - - title: "How to Run Two-Phase Commit Transaction" + - title: "How to Run Two-Phase Commit Transaction by Using ScalarDB GraphQL" url: /docs/3.7/scalardb-graphql/how-to-run-two-phase-commit-transaction/ - title: "How to Run ScalarDB GraphQL Server" url: /docs/3.7/scalardb-graphql/how-to-run-server/ @@ -548,13 +558,13 @@ versions: url: /docs/3.7/scalardb-server/ - title: "Getting Started with Helm Charts (ScalarDB Server)" url: /docs/3.7/helm-charts/getting-started-scalardb/ - - title: "Configure a custom values file for Scalar Helm Charts" + - title: "Configure a Custom Values File for Scalar Helm Charts" url: /docs/3.7/helm-charts/configure-custom-values-file/ - - title: "Deploy Scalar products using Scalar Helm Charts" + - title: "Deploy Scalar Products Using Scalar Helm Charts" url: /docs/3.7/helm-charts/how-to-deploy-scalar-products/ - - title: "Mount any files or volumes on Scalar product pods" + - title: "Mount Any Files or Volumes on Scalar Product Pods" url: /docs/3.7/helm-charts/mount-files-or-volumes-on-scalar-pods/ - - title: "How to use Secret resources to pass the credentials as the environment variables into the properties file" + - title: "How to Use Secret Resources to Pass Credentials as Environment Variables into the Properties File" url: /docs/3.7/helm-charts/use-secret-for-credentials/ - title: "ScalarDB GraphQL Deployment Guide on AWS" url: /docs/3.7/scalardb-graphql/aws-deployment-guide/ @@ -563,7 +573,7 @@ versions: # Manage docs - title: "Manage" children: - - title: "How to Backup and Restore Databases Used Through ScalarDB" + - title: "How to Back Up and Restore Databases Used Through ScalarDB" url: /docs/3.7/backup-restore/ - title: "Managing ScalarDB on Managed Kubernetes Services" url: /docs/3.7/scalar-kubernetes/manage-kubernetes/ @@ -579,9 +589,9 @@ versions: url: /docs/3.7/design/ - title: "ScalarDB Supported Databases" url: /docs/3.7/scalardb-supported-databases/ - - title: "Requirements in the Underlying Databases of ScalarDB" + - title: "Requirements and Recommendations for the Underlying Databases of ScalarDB" url: /docs/3.7/requirements/ - - title: "Storage Abstraction" + - title: "Storage Abstraction and API Guide" url: /docs/3.7/storage-abstraction/ - title: "ScalarDB Benchmarks" url: /docs/3.7/scalardb-benchmarks/README/ @@ -659,17 +669,19 @@ versions: # Develop docs - title: "Develop" children: - - title: "Add ScalarDB to your build" + - title: "Add ScalarDB to Your Build" url: /docs/3.6/add-scalardb-to-your-build/ - title: "Developer Guides for ScalarDB" url: /docs/3.6/guides/ - title: "Configuration Guides for ScalarDB" url: /docs/3.6/development-configurations/ + - title: "ScalarDB Schema Loader" + url: /docs/3.6/schema-loader/ - title: "Export Function for ScalarDB Data Loader" url: /docs/3.6/scalardb-data-loader/getting-started-export/ - title: "Import Function for ScalarDB Data Loader" url: /docs/3.6/scalardb-data-loader/getting-started-import/ - - title: "How to Run Two-Phase Commit Transaction" + - title: "How to Run Two-Phase Commit Transaction by Using ScalarDB GraphQL" url: /docs/3.6/scalardb-graphql/how-to-run-two-phase-commit-transaction/ - title: "How to Run ScalarDB GraphQL Server" url: /docs/3.6/scalardb-graphql/how-to-run-server/ @@ -688,13 +700,13 @@ versions: url: /docs/3.6/scalardb-server/ - title: "Getting Started with Helm Charts (ScalarDB Server)" url: /docs/3.6/helm-charts/getting-started-scalardb/ - - title: "Configure a custom values file for Scalar Helm Charts" + - title: "Configure a Custom Values File for Scalar Helm Charts" url: /docs/3.6/helm-charts/configure-custom-values-file/ - - title: "Deploy Scalar products using Scalar Helm Charts" + - title: "Deploy Scalar Products Using Scalar Helm Charts" url: /docs/3.6/helm-charts/how-to-deploy-scalar-products/ - - title: "Mount any files or volumes on Scalar product pods" + - title: "Mount Any Files or Volumes on Scalar Product Pods" url: /docs/3.6/helm-charts/mount-files-or-volumes-on-scalar-pods/ - - title: "How to use Secret resources to pass the credentials as the environment variables into the properties file" + - title: "How to Use Secret Resources to Pass Credentials as Environment Variables into the Properties File" url: /docs/3.6/helm-charts/use-secret-for-credentials/ - title: "ScalarDB GraphQL Deployment Guide on AWS" url: /docs/3.6/scalardb-graphql/aws-deployment-guide/ @@ -703,7 +715,7 @@ versions: # Manage docs - title: "Manage" children: - - title: "How to Backup and Restore Databases Used Through ScalarDB" + - title: "How to Back Up and Restore Databases Used Through ScalarDB" url: /docs/3.6/backup-restore/ - title: "Managing ScalarDB on Managed Kubernetes Services" url: /docs/3.6/scalar-kubernetes/manage-kubernetes/ @@ -719,9 +731,9 @@ versions: url: /docs/3.6/design/ - title: "ScalarDB Supported Databases" url: /docs/3.6/scalardb-supported-databases/ - - title: "Requirements in the Underlying Databases of ScalarDB" + - title: "Requirements and Recommendations for the Underlying Databases of ScalarDB" url: /docs/3.6/requirements/ - - title: "Storage Abstraction" + - title: "Storage Abstraction and API Guide" url: /docs/3.6/storage-abstraction/ - title: "ScalarDB Benchmarks" url: /docs/3.6/scalardb-benchmarks/README/ @@ -795,10 +807,12 @@ versions: # Develop docs - title: "Develop" children: - - title: "Add ScalarDB to your build" + - title: "Add ScalarDB to Your Build" url: /docs/3.5/add-scalardb-to-your-build/ - title: "Configuration Guides for ScalarDB" url: /docs/3.5/development-configurations/ + - title: "ScalarDB Schema Loader" + url: /docs/3.5/schema-loader/ - title: "Export Function for ScalarDB Data Loader" url: /docs/3.5/scalardb-data-loader/getting-started-export/ - title: "Import Function for ScalarDB Data Loader" @@ -807,7 +821,7 @@ versions: url: /docs/3.5/multi-storage-transactions/ - title: "Two-Phase Commit Transactions" url: /docs/3.5/two-phase-commit-transactions/ - - title: "How to Run Two-Phase Commit Transaction" + - title: "How to Run Two-Phase Commit Transaction by Using ScalarDB GraphQL" url: /docs/3.5/scalardb-graphql/how-to-run-two-phase-commit-transaction/ - title: "How to Run ScalarDB GraphQL Server" url: /docs/3.5/scalardb-graphql/how-to-run-server/ @@ -824,20 +838,20 @@ versions: url: /docs/3.5/scalardb-server/ - title: "Getting Started with Helm Charts (ScalarDB Server)" url: /docs/3.5/helm-charts/getting-started-scalardb/ - - title: "Configure a custom values file for Scalar Helm Charts" + - title: "Configure a Custom Values File for Scalar Helm Charts" url: /docs/3.5/helm-charts/configure-custom-values-file/ - - title: "Deploy Scalar products using Scalar Helm Charts" + - title: "Deploy Scalar Products Using Scalar Helm Charts" url: /docs/3.5/helm-charts/how-to-deploy-scalar-products/ - - title: "Mount any files or volumes on Scalar product pods" + - title: "Mount Any Files or Volumes on Scalar Product Pods" url: /docs/3.5/helm-charts/mount-files-or-volumes-on-scalar-pods/ - - title: "How to use Secret resources to pass the credentials as the environment variables into the properties file" + - title: "How to Use Secret Resources to Pass Credentials as Environment Variables into the Properties File" url: /docs/3.5/helm-charts/use-secret-for-credentials/ - title: "ScalarDB GraphQL Deployment Guide on AWS" url: /docs/3.5/scalardb-graphql/aws-deployment-guide/ # Manage docs - title: "Manage" children: - - title: "How to Backup and Restore Databases Used Through ScalarDB" + - title: "How to Back Up and Restore Databases Used Through ScalarDB" url: /docs/3.5/backup-restore/ - title: "Managing ScalarDB on Managed Kubernetes Services" url: /docs/3.5/scalar-kubernetes/manage-kubernetes/ @@ -853,7 +867,7 @@ versions: url: /docs/3.5/design/ - title: "ScalarDB Supported Databases" url: /docs/3.5/scalardb-supported-databases/ - - title: "Requirements in the Underlying Databases of ScalarDB" + - title: "Requirements and Recommendations for the Underlying Databases of ScalarDB" url: /docs/3.5/requirements/ - title: "ScalarDB Benchmarks" url: /docs/3.5/scalardb-benchmarks/README/ @@ -921,7 +935,7 @@ versions: # Develop docs - title: "Develop" children: - - title: "Add ScalarDB to your build" + - title: "Add ScalarDB to Your Build" url: /docs/3.4/add-scalardb-to-your-build/ - title: "Configuration Guides for ScalarDB" url: /docs/3.4/development-configurations/ @@ -942,18 +956,18 @@ versions: url: /docs/3.4/scalardb-server/ - title: "Getting Started with Helm Charts (ScalarDB Server)" url: /docs/3.4/helm-charts/getting-started-scalardb/ - - title: "Configure a custom values file for Scalar Helm Charts" + - title: "Configure a Custom Values File for Scalar Helm Charts" url: /docs/3.4/helm-charts/configure-custom-values-file/ - - title: "Deploy Scalar products using Scalar Helm Charts" + - title: "Deploy Scalar Products Using Scalar Helm Charts" url: /docs/3.4/helm-charts/how-to-deploy-scalar-products/ - - title: "Mount any files or volumes on Scalar product pods" + - title: "Mount Any Files or Volumes on Scalar Product Pods" url: /docs/3.4/helm-charts/mount-files-or-volumes-on-scalar-pods/ - - title: "How to use Secret resources to pass the credentials as the environment variables into the properties file" + - title: "How to Use Secret Resources to Pass Credentials as Environment Variables into the Properties File" url: /docs/3.4/helm-charts/use-secret-for-credentials/ # Manage docs - title: "Manage" children: - - title: "How to Backup and Restore Databases Used Through ScalarDB" + - title: "How to Back Up and Restore Databases Used Through ScalarDB" url: /docs/3.4/backup-restore/ - title: "Managing ScalarDB on Managed Kubernetes Services" url: /docs/3.4/scalar-kubernetes/manage-kubernetes/ @@ -969,7 +983,7 @@ versions: url: /docs/3.4/design/ - title: "ScalarDB Supported Databases" url: /docs/3.4/scalardb-supported-databases/ - - title: "Requirements in the Underlying Databases of ScalarDB" + - title: "Requirements and Recommendations for the Underlying Databases of ScalarDB" url: /docs/3.4/requirements/ - title: "ScalarDB Benchmarks" url: /docs/3.4/scalardb-benchmarks/README/