diff --git a/_config.yml b/_config.yml index 03ca8342..d90998a0 100644 --- a/_config.yml +++ b/_config.yml @@ -195,6 +195,19 @@ defaults: toc: true toc_sticky: true search: true + # 3.12 + - scope: + path: "docs/3.12" # Specifies the name of the folder where this version of docs are located. + # type: "" # Since this scope uses `collection_dir`, we do not need to specify the type here. + values: + layout: page # Specifies the type of template used from the "_layouts" folder. + read_time: false # Shows the average reading time for pages. + share: false # Shows social media buttons to share pages. + sidebar: # Shows side navigation content from `_data/navigation.yml`. + nav: "latest" # Add the version enclosed within quotation marks. If the docs in the navigation is for the latest version of the product, be sure to set `nav:` to `"latest"`. If the docs in the navigation is for a previous version of the product, be sure to set `nav:` to the product version number (e.g., `"3.8"`). That version number must match the set of docs for that product version in `_data/navigation.yml`. + toc: true + toc_sticky: true + search: false # 3.11 - scope: path: "docs/3.11" # Specifies the name of the folder where this version of docs are located. @@ -204,7 +217,7 @@ defaults: read_time: false # Shows the average reading time for pages. share: false # Shows social media buttons to share pages. sidebar: # Shows side navigation content from `_data/navigation.yml`. - nav: "latest" # Add the version enclosed within quotation marks. If the docs in the navigation is for the latest version of the product, be sure to set `nav:` to `"latest"`. If the docs in the navigation is for a previous version of the product, be sure to set `nav:` to the product version number (e.g., `"3.8"`). That version number must match the set of docs for that product version in `_data/navigation.yml`. + nav: "3.11" # Add the version enclosed within quotation marks. If the docs in the navigation is for the latest version of the product, be sure to set `nav:` to `"latest"`. If the docs in the navigation is for a previous version of the product, be sure to set `nav:` to the product version number (e.g., `"3.8"`). That version number must match the set of docs for that product version in `_data/navigation.yml`. toc: true toc_sticky: true search: false diff --git a/_data/navigation.yml b/_data/navigation.yml index b371445e..b8b440d4 100644 --- a/_data/navigation.yml +++ b/_data/navigation.yml @@ -37,8 +37,10 @@ editions: versions: - version-top-title: "" # This title is taken from the `version_label` in `_data/ui-text.yml`. version-children: - - version-title: "3.11 (latest)" + - version-title: "3.12 (latest)" version-url: /docs/latest/getting-started-with-scalardb/ + - version-title: "3.11" + version-url: /docs/3.11/getting-started-with-scalardb/ - version-title: "3.10" version-url: /docs/3.10/getting-started-with-scalardb/ - version-title: "3.9" @@ -62,9 +64,9 @@ versions: # After that, add a new scope in `_config.yml` to include an item that provides the proper versioned navigation to the site when someone visits the page (i.e., make sure people who visit a version 3.8 doc are shown a side navigation that includes only 3.8 docs.) "latest": - - title: "⬅ ScalarDB Enterprise docs home" + - title: "⬅ ScalarDB Enterprise docs home" url: /docs/ # Don't change this URL. This links back to the parent product home page. - - title: "ScalarDB 3.11 Enterprise" + - title: "ScalarDB 3.12 Enterprise" children: # Get Started docs - title: "Get Started" @@ -88,9 +90,9 @@ versions: - title: "Getting Started with the ScalarDB Cluster .NET Client SDK" url: /docs/latest/scalardb-cluster-dotnet-client-sdk/ # Samples docs - - title: "Samples" + - title: "Samples" children: - - title: "ScalarDB Samples" + - title: "ScalarDB Samples" url: /docs/latest/scalardb-samples/README/ # - title: "ScalarDB Samples" # url: /docs/latest/scalardb-samples/scalardb-sample/README/ @@ -171,7 +173,7 @@ versions: # - title: "Migrate" # children: # - title: "" - # url: + # url: # Reference docs - title: "Reference" children: @@ -211,8 +213,158 @@ versions: - title: "Release Support Policy" url: /docs/releases/release-support-policy/ +"3.11": + - title: "⬅ ScalarDB Enterprise docs home" + url: /docs/ # Don't change this URL. This links back to the parent product home page. + - title: "ScalarDB 3.11 Enterprise" + children: + # Get Started docs + - title: "Get Started" + children: + # - title: "Getting Started with ScalarDB on Cassandra" + # url: /docs/3.11/getting-started-with-scalardb-on-cassandra/ + # - title: "Getting Started with ScalarDB on Cosmos DB" + # url: /docs/3.11/getting-started-with-scalardb-on-cosmosdb/ + # - title: "Getting Started with ScalarDB on DynamoDB" + # url: /docs/3.11/getting-started-with-scalardb-on-dynamodb/ + # - title: "Getting Started with ScalarDB on JDBC databases" + # url: /docs/3.11/getting-started-with-scalardb-on-jdbc/ + - title: "Getting Started with ScalarDB" + url: /docs/3.11/getting-started-with-scalardb/ + - title: "Getting Started with ScalarDB by Using Kotlin" + url: /docs/3.11/getting-started-with-scalardb-by-using-kotlin/ + - title: "Getting Started with ScalarDB Analytics with PostgreSQL" + url: /docs/3.11/scalardb-analytics-postgresql/getting-started/ + - title: "Getting Started with ScalarDB Cluster" + url: /docs/3.11/scalardb-cluster/getting-started-with-scalardb-cluster-overview/ + - title: "Getting Started with the ScalarDB Cluster .NET Client SDK" + url: /docs/3.11/scalardb-cluster-dotnet-client-sdk/ + # Samples docs + - title: "Samples" + children: + - title: "ScalarDB Samples" + url: /docs/3.11/scalardb-samples/README/ + # - title: "ScalarDB Samples" + # url: /docs/3.11/scalardb-samples/scalardb-sample/README/ + # - title: "Microservice Transaction Sample" + # url: /docs/3.11/scalardb-samples/microservice-transaction-sample/README/ + # - title: "Multi-Storage Transaction Sample" + # url: /docs/3.11/scalardb-samples/multi-storage-transaction-sample/README/ + # - title: "ScalarDB GraphQL Sample" + # url: /docs/3.11/scalardb-samples/scalardb-graphql-sample/README/ + # - title: "Spring Data Multi-Storage Transaction Sample" + # url: /docs/3.11/scalardb-samples/spring-data-multi-storage-transaction-sample/README/ + # - title: "Spring Data Sample" + # url: /docs/3.11/scalardb-samples/spring-data-sample/README/ + # Develop docs + - title: "Develop" + children: + - title: "Add ScalarDB to Your Build" + url: /docs/3.11/add-scalardb-to-your-build/ + - title: "Add ScalarDB SQL to Your Build" + url: /docs/3.11/scalardb-sql/add-scalardb-sql-to-your-build/ + - title: "Developer Guides for ScalarDB" + url: /docs/3.11/guides/ + - title: "Configuration Guides for ScalarDB" + url: /docs/3.11/development-configurations/ + - title: "ScalarDB Schema Loader" + url: /docs/3.11/schema-loader/ + - title: "Importing Existing Tables to ScalarDB by Using ScalarDB Schema Loader" + url: /docs/3.11/schema-loader-import/ + - title: "ScalarDB FDW for ScalarDB Analytics with PostgreSQL" + url: /docs/3.11/scalardb-analytics-postgresql/scalardb-fdw/ + - title: "Schema Importer for ScalarDB Analytics with PostgreSQL" + url: /docs/3.11/scalardb-analytics-postgresql/schema-importer/ + # - title: "Export Function for ScalarDB Data Loader" # May be added in the near future. + # url: /docs/3.11/scalardb-data-loader/getting-started-export/ + # - title: "Import Function for ScalarDB Data Loader" # May be added in the near future. + # url: /docs/3.11/scalardb-data-loader/getting-started-import/ + - title: "ScalarDB Auth with ScalarDB SQL" + url : /docs/3.11/scalardb-cluster/scalardb-auth-with-sql/ + - title: "How to Run ScalarDB GraphQL Server" + url: /docs/3.11/scalardb-graphql/how-to-run-server/ + - title: "How to Run Two-Phase Commit Transaction by Using ScalarDB GraphQL" + url: /docs/3.11/scalardb-graphql/how-to-run-two-phase-commit-transaction/ + - title: "ScalarDB SQL Command Line Interface" + url: /docs/3.11/scalardb-sql/command-line-interface/ + # Deploy docs + - title: "Deploy" + children: + - title: "Deploying ScalarDB on Managed Kubernetes Services" + url: /docs/3.11/scalar-kubernetes/deploy-kubernetes/ + - title: "Getting Started with Scalar Helm Charts" + url: /docs/3.11/helm-charts/getting-started-scalar-helm-charts/ + - title: "Set Up ScalarDB Cluster on Kubernetes by Using a Helm Chart" + url: /docs/3.11/scalardb-cluster/setup-scalardb-cluster-on-kubernetes-by-using-helm-chart/ + - title: "Configure a Custom Values File for Scalar Helm Charts" + url: /docs/3.11/helm-charts/configure-custom-values-file/ + - title: "Deploy Scalar Products Using Scalar Helm Charts" + url: /docs/3.11/helm-charts/how-to-deploy-scalar-products/ + - title: "Mount Any Files or Volumes on Scalar Product Pods" + url: /docs/3.11/helm-charts/mount-files-or-volumes-on-scalar-pods/ + - title: "How to Use Secret Resources to Pass Credentials as Environment Variables into the Properties File" + url: /docs/3.11/helm-charts/use-secret-for-credentials/ + - title: "How to Install ScalarDB Analytics with PostgreSQL in Your Local Environment by Using Docker" + url: /docs/3.11/scalardb-analytics-postgresql/installation/ + - title: "ScalarDB Cluster Standalone Mode" + url: /docs/3.11/scalardb-cluster/standalone-mode/ + - title: "ScalarDB GraphQL Deployment Guide on AWS" + url: /docs/3.11/scalardb-graphql/aws-deployment-guide/ + - title: "ScalarDB SQL Server" + url: /docs/3.11/scalardb-sql/sql-server/ + # Manage docs + - title: "Manage" + children: + - title: "How to Back Up and Restore Databases Used Through ScalarDB" + url: /docs/3.11/backup-restore/ + - title: "Managing ScalarDB on Managed Kubernetes Services" + url: /docs/3.11/scalar-kubernetes/manage-kubernetes/ + # Migrate docs + # - title: "Migrate" + # children: + # - title: "" + # url: + # Reference docs + - title: "Reference" + children: + - title: "ScalarDB Design Document" + url: /docs/3.11/design/ + - title: "ScalarDB Supported Databases" + url: /docs/3.11/scalardb-supported-databases/ + - title: "Requirements and Recommendations for the Underlying Databases of ScalarDB" + url: /docs/3.11/requirements/ + - title: "Storage Abstraction and API Guide" + url: /docs/3.11/storage-abstraction/ + - title: "ScalarDB Benchmarks" + url: /docs/3.11/scalardb-benchmarks/README/ + - title: "ScalarDB Cluster" + url: /docs/3.11/scalardb-cluster/ + - title: "ScalarDB SQL Grammar" + url: /docs/3.11/scalardb-sql/grammar/ + # Release docs and notes + - title: "Releases" + children: + - title: "Release Notes" + url: /docs/releases/ + # - title: "v3.10" + # url: /docs/releases/release-3.10/ + # - title: "v3.9" + # url: /docs/releases/release-3.9/ + # - title: "v3.8" + # url: /docs/releases/release-3.8/ + # - title: "v3.7" + # url: /docs/releases/release-3.7/ + # - title: "v3.6" + # url: /docs/releases/release-3.6/ + # - title: "v3.5" + # url: /docs/releases/release-3.5/ + # - title: "v3.4" + # url: /docs/releases/release-3.4/ + - title: "Release Support Policy" + url: /docs/releases/release-support-policy/ + "3.10": - - title: "⬅ ScalarDB Enterprise docs home" + - title: "⬅ ScalarDB Enterprise docs home" url: /docs/ # Don't change this URL. This links back to the parent product home page. - title: "ScalarDB 3.10 Enterprise" children: @@ -236,9 +388,9 @@ versions: - title: "Getting Started with ScalarDB Cluster" url: /docs/3.10/scalardb-cluster/getting-started-with-scalardb-cluster-overview/ # Samples docs - - title: "Samples" + - title: "Samples" children: - - title: "ScalarDB Samples" + - title: "ScalarDB Samples" url: /docs/3.10/scalardb-samples/README/ # - title: "ScalarDB Samples" # url: /docs/3.10/scalardb-samples/scalardb-sample/README/ @@ -354,7 +506,7 @@ versions: url: /docs/releases/release-support-policy/ "3.9": - - title: "⬅ ScalarDB Enterprise docs home" + - title: "⬅ ScalarDB Enterprise docs home" url: /docs/ # Don't change this URL. This links back to the parent product home page. - title: "ScalarDB 3.9 Enterprise" children: @@ -378,9 +530,9 @@ versions: - title: "Getting Started with ScalarDB Cluster" url: /docs/3.9/scalardb-cluster/getting-started-with-scalardb-cluster-overview/ # Samples docs - - title: "Samples" + - title: "Samples" children: - - title: "ScalarDB Samples" + - title: "ScalarDB Samples" url: /docs/3.9/scalardb-samples/README/ # - title: "ScalarDB Samples" # url: /docs/3.9/scalardb-samples/scalardb-sample/README/ @@ -496,7 +648,7 @@ versions: url: /docs/releases/release-support-policy/ "3.8": - - title: "⬅ ScalarDB Enterprise docs home" + - title: "⬅ ScalarDB Enterprise docs home" url: /docs/ # Don't change this URL. This links back to the parent product home page. - title: "ScalarDB 3.8 Enterprise" children: @@ -522,9 +674,9 @@ versions: - title: "Getting Started with ScalarDB SQL" url: /docs/3.8/scalardb-sql/getting-started-with-sql/ # Samples docs - - title: "Samples" + - title: "Samples" children: - - title: "ScalarDB Samples" + - title: "ScalarDB Samples" url: /docs/3.8/scalardb-samples/README/ # - title: "ScalarDB Samples" # url: /docs/3.8/scalardb-samples/scalardb-sample/README/ @@ -638,7 +790,7 @@ versions: url: /docs/releases/release-support-policy/ "3.7": - - title: "⬅ ScalarDB Enterprise docs home" + - title: "⬅ ScalarDB Enterprise docs home" url: /docs/ # Don't change this URL. This links back to the parent product home page. - title: "ScalarDB 3.7 Enterprise" children: @@ -664,9 +816,9 @@ versions: - title: "Getting Started with ScalarDB SQL" url: /docs/3.7/scalardb-sql/getting-started-with-sql/ # Samples docs - - title: "Samples" + - title: "Samples" children: - - title: "ScalarDB Samples" + - title: "ScalarDB Samples" url: /docs/3.7/scalardb-samples/README/ # - title: "ScalarDB Samples" # url: /docs/3.7/scalardb-samples/scalardb-sample/README/ @@ -778,7 +930,7 @@ versions: url: /docs/releases/release-support-policy/ "3.6": - - title: "⬅ ScalarDB Enterprise docs home" + - title: "⬅ ScalarDB Enterprise docs home" url: /docs/ # Don't change this URL. This links back to the parent product home page. - title: "ScalarDB 3.6 Enterprise" children: @@ -804,9 +956,9 @@ versions: - title: "Getting Started with ScalarDB SQL" url: /docs/3.6/scalardb-sql/getting-started-with-sql/ # Samples docs - - title: "Samples" + - title: "Samples" children: - - title: "ScalarDB Samples" + - title: "ScalarDB Samples" url: /docs/3.6/scalardb-samples/README/ # - title: "ScalarDB Samples" # url: /docs/3.6/scalardb-samples/scalardb-sample/README/ @@ -918,7 +1070,7 @@ versions: url: /docs/releases/release-support-policy/ "3.5": - - title: "⬅ ScalarDB Enterprise docs home" + - title: "⬅ ScalarDB Enterprise docs home" url: /docs/ # Don't change this URL. This links back to the parent product home page. - title: "ScalarDB 3.5 Enterprise" children: @@ -940,9 +1092,9 @@ versions: - title: "Getting Started with ScalarDB GraphQL" url: /docs/3.5/scalardb-graphql/getting-started-with-scalardb-graphql/ # Samples docs - - title: "Samples" + - title: "Samples" children: - - title: "ScalarDB Samples" + - title: "ScalarDB Samples" url: /docs/3.5/scalardb-samples/README/ # - title: "ScalarDB Samples" # url: /docs/3.5/scalardb-samples/scalardb-sample/README/ @@ -1048,7 +1200,7 @@ versions: url: /docs/releases/release-support-policy/ "3.4": - - title: "⬅ ScalarDB Enterprise docs home" + - title: "⬅ ScalarDB Enterprise docs home" url: /docs/ # Don't change this URL. This links back to the parent product home page. - title: "ScalarDB 3.4 Enterprise" children: @@ -1068,9 +1220,9 @@ versions: - title: "Getting Started with ScalarDB" url: /docs/3.4/getting-started-with-scalardb/ # Samples docs - - title: "Samples" + - title: "Samples" children: - - title: "ScalarDB Samples" + - title: "ScalarDB Samples" url: /docs/3.4/scalardb-samples/README/ # - title: "ScalarDB Samples" # url: /docs/3.4/scalardb-samples/scalardb-sample/README/ diff --git a/docs/3.12/add-scalardb-to-your-build.md b/docs/3.12/add-scalardb-to-your-build.md new file mode 100644 index 00000000..85d8e7fa --- /dev/null +++ b/docs/3.12/add-scalardb-to-your-build.md @@ -0,0 +1,37 @@ +# Add ScalarDB to Your Build + +The ScalarDB library is available on the [Maven Central Repository](https://mvnrepository.com/artifact/com.scalar-labs/scalardb). You can add the library as a build dependency to your application by using Gradle or Maven. + +## Configure your application based on your build tool + +Select your build tool, and follow the instructions to add the build dependency for ScalarDB to your application. + +
+
+ + +
+ +
+ +To add the build dependency for ScalarDB by using Gradle, add the following to `build.gradle` in your application, replacing `` with the version of ScalarDB that you want to use: + +```gradle +dependencies { + implementation 'com.scalar-labs:scalardb:' +} +``` +
+
+ +To add the build dependency for ScalarDB by using Maven, add the following to `pom.xml` in your application, replacing `` with the version of ScalarDB that you want to use: + +```xml + + com.scalar-labs + scalardb + + +``` +
+
diff --git a/docs/3.12/api-guide.md b/docs/3.12/api-guide.md new file mode 100644 index 00000000..2e0cb99b --- /dev/null +++ b/docs/3.12/api-guide.md @@ -0,0 +1,1176 @@ +# ScalarDB Java API Guide + +The ScalarDB Java API is mainly composed of the Administrative API and Transactional API. This guide briefly explains what kinds of APIs exist, how to use them, and related topics like how to handle exceptions. + +## Administrative API + +This section explains how to execute administrative operations programmatically by using the Administrative API in ScalarDB. + +{% capture notice--info %} +**Note** + +Another method for executing administrative operations is to use [Schema Loader](schema-loader.md). +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Get a `DistributedTransactionAdmin` instance + +You first need to get a `DistributedTransactionAdmin` instance to execute administrative operations. + +To get a `DistributedTransactionAdmin` instance, you can use `TransactionFactory` as follows: + +```java +TransactionFactory transactionFactory = TransactionFactory.create(""); +DistributedTransactionAdmin admin = transactionFactory.getTransactionAdmin(); +``` + +For details about configurations, see [ScalarDB Configurations](configurations.md). + +After you have executed all administrative operations, you should close the `DistributedTransactionAdmin` instance as follows: + +```java +admin.close(); +``` + +### Create a namespace + +Before creating tables, namespaces must be created since a table belongs to one namespace. + +You can create a namespace as follows: + +```java +// Create the namespace "ns". If the namespace already exists, an exception will be thrown. +admin.createNamespace("ns"); + +// Create the namespace only if it does not already exist. +boolean ifNotExists = true; +admin.createNamespace("ns", ifNotExists); + +// Create the namespace with options. +Map options = ...; +admin.createNamespace("ns", options); +``` + +#### Creation options + +In the creation operations, like creating a namespace and creating a table, you can specify options that are maps of option names and values (`Map`). By using the options, you can set storage adapter–specific configurations. + +Select your database to see the options available: + +
+
+ + + + +
+ +
+ +| Name | Description | Default | +|----------------------|----------------------------------------------------------------------------------------|------------------| +| replication-strategy | Cassandra replication strategy. Must be `SimpleStrategy` or `NetworkTopologyStrategy`. | `SimpleStrategy` | +| compaction-strategy | Cassandra compaction strategy, Must be `LCS`, `STCS` or `TWCS`. | `STCS` | +| replication-factor | Cassandra replication factor. | 1 | + +
+
+ +| Name | Description | Default | +|------------|-----------------------------------------------------|---------| +| ru | Base resource unit. | 400 | +| no-scaling | Disable auto-scaling for Cosmos DB for NoSQL. | false | + +
+
+ +| Name | Description | Default | +|------------|-----------------------------------------|---------| +| no-scaling | Disable auto-scaling for DynamoDB. | false | +| no-backup | Disable continuous backup for DynamoDB. | false | +| ru | Base resource unit. | 10 | + +
+
+ +No options are available for JDBC databases. + +
+
+ +### Create a table + +When creating a table, you should define the table metadata and then create the table. + +To define the table metadata, you can use `TableMetadata`. The following shows how to define the columns, partition key, clustering key including clustering orders, and secondary indexes of a table: + +```java +// Define the table metadata. +TableMetadata tableMetadata = + TableMetadata.newBuilder() + .addColumn("c1", DataType.INT) + .addColumn("c2", DataType.TEXT) + .addColumn("c3", DataType.BIGINT) + .addColumn("c4", DataType.FLOAT) + .addColumn("c5", DataType.DOUBLE) + .addPartitionKey("c1") + .addClusteringKey("c2", Scan.Ordering.Order.DESC) + .addClusteringKey("c3", Scan.Ordering.Order.ASC) + .addSecondaryIndex("c4") + .build(); +``` + +For details about the data model of ScalarDB, see [Data Model](design.md#data-model). + +Then, create a table as follows: + +```java +// Create the table "ns.tbl". If the table already exists, an exception will be thrown. +admin.createTable("ns", "tbl", tableMetadata); + +// Create the table only if it does not already exist. +boolean ifNotExists = true; +admin.createTable("ns", "tbl", tableMetadata, ifNotExists); + +// Create the table with options. +Map options = ...; +admin.createTable("ns", "tbl", tableMetadata, options); +``` + +### Create a secondary index + +You can create a secondary index as follows: + +```java +// Create a secondary index on column "c5" for table "ns.tbl". If a secondary index already exists, an exception will be thrown. +admin.createIndex("ns", "tbl", "c5"); + +// Create the secondary index only if it does not already exist. +boolean ifNotExists = true; +admin.createIndex("ns", "tbl", "c5", ifNotExists); + +// Create the secondary index with options. +Map options = ...; +admin.createIndex("ns", "tbl", "c5", options); +``` + +### Add a new column to a table + +You can add a new, non-partition key column to a table as follows: + +```java +// Add a new column "c6" with the INT data type to the table "ns.tbl". +admin.addNewColumnToTable("ns", "tbl", "c6", DataType.INT) +``` + +{% capture notice--warning %} +**Attention** + +You should carefully consider adding a new column to a table because the execution time may vary greatly depending on the underlying storage. Please plan accordingly and consider the following, especially if the database runs in production: + +- **For Cosmos DB for NoSQL and DynamoDB:** Adding a column is almost instantaneous as the table schema is not modified. Only the table metadata stored in a separate table is updated. +- **For Cassandra:** Adding a column will only update the schema metadata and will not modify the existing schema records. The cluster topology is the main factor for the execution time. Changes to the schema metadata are shared to each cluster node via a gossip protocol. Because of this, the larger the cluster, the longer it will take for all nodes to be updated. +- **For relational databases (MySQL, Oracle, etc.):** Adding a column shouldn't take a long time to execute. +{% endcapture %} + +
{{ notice--warning | markdownify }}
+ +### Truncate a table + +You can truncate a table as follows: + +```java +// Truncate the table "ns.tbl". +admin.truncateTable("ns", "tbl"); +``` + +### Drop a secondary index + +You can drop a secondary index as follows: + +```java +// Drop the secondary index on column "c5" from table "ns.tbl". If the secondary index does not exist, an exception will be thrown. +admin.dropIndex("ns", "tbl", "c5"); + +// Drop the secondary index only if it exists. +boolean ifExists = true; +admin.dropIndex("ns", "tbl", "c5", ifExists); +``` + +### Drop a table + +You can drop a table as follows: + +```java +// Drop the table "ns.tbl". If the table does not exist, an exception will be thrown. +admin.dropTable("ns", "tbl"); + +// Drop the table only if it exists. +boolean ifExists = true; +admin.dropTable("ns", "tbl", ifExists); +``` + +### Drop a namespace + +You can drop a namespace as follows: + +```java +// Drop the namespace "ns". If the namespace does not exist, an exception will be thrown. +admin.dropNamespace("ns"); + +// Drop the namespace only if it exists. +boolean ifExists = true; +admin.dropNamespace("ns", ifExists); +``` + +### Get the tables of a namespace + +You can get the tables of a namespace as follows: + +```java +// Get the tables of the namespace "ns". +Set tables = admin.getNamespaceTableNames("ns"); +``` + +### Get table metadata + +You can get table metadata as follows: + +```java +// Get the table metadata for "ns.tbl". +TableMetadata tableMetadata = admin.getTableMetadata("ns", "tbl"); +``` +### Repair a table + +You can repair the table metadata of an existing table as follows: + +```java +// Repair the table "ns.tbl" with options. +TableMetadata tableMetadata = + TableMetadata.newBuilder() + ... + .build(); +Map options = ...; +admin.repairTable("ns", "tbl", tableMetadata, options); +``` + +### Specify operations for the Coordinator table + +The Coordinator table is used by the [Transactional API](#transactional-api) to track the statuses of transactions. + +When using a transaction manager, you must create the Coordinator table to execute transactions. In addition to creating the table, you can truncate and drop the Coordinator table. + +#### Create the Coordinator table + +You can create the Coordinator table as follows: + +```java +// Create the Coordinator table. +admin.createCoordinatorTables(); + +// Create the Coordinator table only if one does not already exist. +boolean ifNotExist = true; +admin.createCoordinatorTables(ifNotExist); + +// Create the Coordinator table with options. +Map options = ...; +admin.createCoordinatorTables(options); +``` + +#### Truncate the Coordinator table + +You can truncate the Coordinator table as follows: + +```java +// Truncate the Coordinator table. +admin.truncateCoordinatorTables(); +``` + +#### Drop the Coordinator table + +You can drop the Coordinator table as follows: + +```java +// Drop the Coordinator table. +admin.dropCoordinatorTables(); + +// Drop the Coordinator table if one exist. +boolean ifExist = true; +admin.dropCoordinatorTables(ifExist); +``` + +### Import a table + +You can import an existing table to ScalarDB as follows: + +```java +// Import the table "ns.tbl". If the table is already managed by ScalarDB, the target table does not +// exist, or the table does not meet the requirements of the ScalarDB table, an exception will be thrown. +admin.importTable("ns", "tbl", options); +``` + +{% capture notice--warning %} +**Attention** + +You should carefully plan to import a table to ScalarDB in production because it will add transaction metadata columns to your database tables and the ScalarDB metadata tables. In this case, there would also be several differences between your database and ScalarDB, as well as some limitations. For details, see [Importing Existing Tables to ScalarDB by Using ScalarDB Schema Loader](./schema-loader-import.md). + +{% endcapture %} + +
{{ notice--warning | markdownify }}
+ +## Transactional API + +This section explains how to execute transactional operations by using the Transactional API in ScalarDB. + +### Get a `DistributedTransactionManager` instance + +You first need to get a `DistributedTransactionManager` instance to execute transactional operations. + +To get a `DistributedTransactionManager` instance, you can use `TransactionFactory` as follows: + +```java +TransactionFactory transactionFactory = TransactionFactory.create(""); +DistributedTransactionManager transactionManager = transactionFactory.getTransactionManager(); +``` + +After you have executed all transactional operations, you should close the `DistributedTransactionManager` instance as follows: + +```java +transactionManager.close(); +``` + +### Begin or start a transaction + +Before executing transactional CRUD operations, you need to begin or start a transaction. + +You can begin a transaction as follows: + +```java +// Begin a transaction. +DistributedTransaction transaction = transactionManager.begin(); +``` + +Or, you can start a transaction as follows: + +```java +// Start a transaction. +DistributedTransaction transaction = transactionManager.start(); +``` + +Alternatively, you can use the `begin` method for a transaction by specifying a transaction ID as follows: + +```java +// Begin a transaction with specifying a transaction ID. +DistributedTransaction transaction = transactionManager.begin(""); +``` + +Or, you can use the `start` method for a transaction by specifying a transaction ID as follows: + +```java +// Start a transaction with specifying a transaction ID. +DistributedTransaction transaction = transactionManager.start(""); +``` + +{% capture notice--info %} +**Note** + +Specifying a transaction ID is useful when you want to link external systems to ScalarDB. Otherwise, you should use the `begin()` method or the `start()` method. + +When you specify a transaction ID, make sure you specify a unique ID (for example, UUID v4) throughout the system since ScalarDB depends on the uniqueness of transaction IDs for correctness. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Join a transaction + +Joining a transaction is particularly useful in a stateful application where a transaction spans multiple client requests. In such a scenario, the application can start a transaction during the first client request. Then, in subsequent client requests, the application can join the ongoing transaction by using the `join()` method. + +You can join an ongoing transaction that has already begun by specifying the transaction ID as follows: + +```java +// Join a transaction. +DistributedTransaction transaction = transactionManager.join(""); +``` + +{% capture notice--info %} +**Note** + +To get the transaction ID with `getId()`, you can specify the following: + +```java +tx.getId(); +``` +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Resume a transaction + +Resuming a transaction is particularly useful in a stateful application where a transaction spans multiple client requests. In such a scenario, the application can start a transaction during the first client request. Then, in subsequent client requests, the application can resume the ongoing transaction by using the `resume()` method. + +You can resume an ongoing transaction that you have already begun by specifying a transaction ID as follows: + +```java +// Resume a transaction. +DistributedTransaction transaction = transactionManager.resume(""); +``` + +{% capture notice--info %} +**Note** + +To get the transaction ID with `getId()`, you can specify the following: + +```java +tx.getId(); +``` +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Implement CRUD operations + +The following sections describe key construction and CRUD operations. + +{% capture notice--info %} +**Note** + +Although all the builders of the CRUD operations can specify consistency by using the `consistency()` methods, those methods are ignored. Instead, the `LINEARIZABLE` consistency level is always used in transactions. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +#### Key construction + +Most CRUD operations need to specify `Key` objects (partition-key, clustering-key, etc.). So, before moving on to CRUD operations, the following explains how to construct a `Key` object. + +For a single column key, you can use `Key.of()` methods to construct the key as follows: + +```java +// For a key that consists of a single column of INT. +Key key1 = Key.ofInt("col1", 1); + +// For a key that consists of a single column of BIGINT. +Key key2 = Key.ofBigInt("col1", 100L); + +// For a key that consists of a single column of DOUBLE. +Key key3 = Key.ofDouble("col1", 1.3d); + +// For a key that consists of a single column of TEXT. +Key key4 = Key.ofText("col1", "value"); +``` + +For a key that consists of two to five columns, you can use the `Key.of()` method to construct the key as follows. Similar to `ImmutableMap.of()` in Guava, you need to specify column names and values in turns: + +```java +// For a key that consists of two to five columns. +Key key1 = Key.of("col1", 1, "col2", 100L); +Key key2 = Key.of("col1", 1, "col2", 100L, "col3", 1.3d); +Key key3 = Key.of("col1", 1, "col2", 100L, "col3", 1.3d, "col4", "value"); +Key key4 = Key.of("col1", 1, "col2", 100L, "col3", 1.3d, "col4", "value", "col5", false); +``` + +For a key that consists of more than five columns, we can use the builder to construct the key as follows: + +```java +// For a key that consists of more than five columns. +Key key = Key.newBuilder() + .addInt("col1", 1) + .addBigInt("col2", 100L) + .addDouble("col3", 1.3d) + .addText("col4", "value") + .addBoolean("col5", false) + .addInt("col6", 100) + .build(); +``` + +#### `Get` operation + +`Get` is an operation to retrieve a single record specified by a primary key. + +You need to create a `Get` object first, and then you can execute the object by using the `transaction.get()` method as follows: + +```java +// Create a `Get` operation. +Key partitionKey = Key.ofInt("c1", 10); +Key clusteringKey = Key.of("c2", "aaa", "c3", 100L); + +Get get = + Get.newBuilder() + .namespace("ns") + .table("tbl") + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .projections("c1", "c2", "c3", "c4") + .build(); + +// Execute the `Get` operation. +Optional result = transaction.get(get); +``` + +You can also specify projections to choose which columns are returned. + +##### Handle `Result` objects + +The `Get` operation and `Scan` operation return `Result` objects. The following shows how to handle `Result` objects. + +You can get a column value of a result by using `get("")` methods as follows: + +```java +// Get the BOOLEAN value of a column. +boolean booleanValue = result.getBoolean(""); + +// Get the INT value of a column. +int intValue = result.getInt(""); + +// Get the BIGINT value of a column. +long bigIntValue = result.getBigInt(""); + +// Get the FLOAT value of a column. +float floatValue = result.getFloat(""); + +// Get the DOUBLE value of a column. +double doubleValue = result.getDouble(""); + +// Get the TEXT value of a column. +String textValue = result.getText(""); + +// Get the BLOB value of a column as a `ByteBuffer`. +ByteBuffer blobValue = result.getBlob(""); + +// Get the BLOB value of a column as a `byte` array. +byte[] blobValueAsBytes = result.getBlobAsBytes(""); +``` + +And if you need to check if a value of a column is null, you can use the `isNull("")` method. + +``` java +// Check if a value of a column is null. +boolean isNull = result.isNull(""); +``` + +For more details, see the `Result` page in the [Javadoc](https://javadoc.io/doc/com.scalar-labs/scalardb/latest/index.html) of the version of ScalarDB that you're using. + +##### Execute `Get` by using a secondary index + +You can execute a `Get` operation by using a secondary index. + +Instead of specifying a partition key, you can specify an index key (indexed column) to use a secondary index as follows: + +```java +// Create a `Get` operation by using a secondary index. +Key indexKey = Key.ofFloat("c4", 1.23F); + +Get get = + Get.newBuilder() + .namespace("ns") + .table("tbl") + .indexKey(indexKey) + .projections("c1", "c2", "c3", "c4") + .build(); + +// Execute the `Get` operation. +Optional result = transaction.get(get); +``` + +{% capture notice--info %} +**Note** + +If the result has more than one record, `transaction.get()` will throw an exception. If you want to handle multiple results, see [Execute `Scan` by using a secondary index](#execute-scan-by-using-a-secondary-index). + +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +#### `Scan` operation + +`Scan` is an operation to retrieve multiple records within a partition. You can specify clustering-key boundaries and orderings for clustering-key columns in `Scan` operations. + +You need to create a `Scan` object first, and then you can execute the object by using the `transaction.scan()` method as follows: + +```java +// Create a `Scan` operation. +Key partitionKey = Key.ofInt("c1", 10); +Key startClusteringKey = Key.of("c2", "aaa", "c3", 100L); +Key endClusteringKey = Key.of("c2", "aaa", "c3", 300L); + +Scan scan = + Scan.newBuilder() + .namespace("ns") + .table("tbl") + .partitionKey(partitionKey) + .start(startClusteringKey, true) // Include startClusteringKey + .end(endClusteringKey, false) // Exclude endClusteringKey + .projections("c1", "c2", "c3", "c4") + .orderings(Scan.Ordering.desc("c2"), Scan.Ordering.asc("c3")) + .limit(10) + .build(); + +// Execute the `Scan` operation. +List results = transaction.scan(scan); +``` + +You can omit the clustering-key boundaries or specify either a `start` boundary or an `end` boundary. If you don't specify `orderings`, you will get results ordered by the clustering order that you defined when creating the table. + +In addition, you can specify `projections` to choose which columns are returned and use `limit` to specify the number of records to return in `Scan` operations. + +##### Execute `Scan` by using a secondary index + +You can execute a `Scan` operation by using a secondary index. + +Instead of specifying a partition key, you can specify an index key (indexed column) to use a secondary index as follows: + +```java +// Create a `Scan` operation by using a secondary index. +Key indexKey = Key.ofFloat("c4", 1.23F); + +Scan scan = + Scan.newBuilder() + .namespace("ns") + .table("tbl") + .indexKey(indexKey) + .projections("c1", "c2", "c3", "c4") + .limit(10) + .build(); + +// Execute the `Scan` operation. +List results = transaction.scan(scan); +``` + +{% capture notice--info %} +**Note** + +You can't specify clustering-key boundaries and orderings in `Scan` by using a secondary index. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +##### Execute cross-partition `Scan` without specifying a partition key to retrieve all the records of a table + +You can execute a `Scan` operation across all partitions, which we call *cross-partition scan*, without specifying a partition key by enabling the following configuration in the ScalarDB properties file. + +```properties +scalar.db.cross_partition_scan.enabled=true +``` + +{% capture notice--warning %} +**Attention** + +For non-JDBC databases, we do not recommend enabling cross-partition scan with the `SERIALIAZABLE` isolation level because transactions could be executed at a lower isolation level (that is, `SNAPSHOT`). When using non-JDBC databases, use cross-partition scan at your own risk only if consistency does not matter for your transactions. +{% endcapture %} + +
{{ notice--warning | markdownify }}
+ +Instead of calling the `partitionKey()` method in the builder, you can call the `all()` method to scan a table without specifying a partition key as follows: + +```java +// Create a `Scan` operation without specifying a partition key. +Scan scan = + Scan.newBuilder() + .namespace("ns") + .table("tbl") + .all() + .projections("c1", "c2", "c3", "c4") + .limit(10) + .build(); + +// Execute the `Scan` operation. +List results = transaction.scan(scan); +``` + +{% capture notice--info %} +**Note** + +You can't specify any filtering conditions and orderings in cross-partition `Scan` except for when using JDBC databases. For details on how to use cross-partition `Scan` with filtering or ordering for JDBC databases, see [Execute cross-partition `Scan` with filtering and ordering](#execute-cross-partition-scan-with-filtering-and-ordering). +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +##### Execute cross-partition `Scan` with filtering and ordering + +By enabling the cross-partition scan option with filtering and ordering for JDBC databases as follows, you can execute a cross-partition `Scan` operation with flexible conditions and orderings: + +```properties +scalar.db.cross_partition_scan.enabled=true +scalar.db.cross_partition_scan.filtering.enabled=true +scalar.db.cross_partition_scan.ordering.enabled=true +``` + +You can call the `where()` and `ordering()` methods after calling the `all()` method to specify arbitrary conditions and orderings as follows: + +```java +// Create a `Scan` operation with arbitrary conditions and orderings. +Scan scan = + Scan.newBuilder() + .namespace("ns") + .table("tbl") + .all() + .where(ConditionBuilder.column("c1").isNotEqualToInt(10)) + .projections("c1", "c2", "c3", "c4") + .orderings(Scan.Ordering.desc("c3"), Scan.Ordering.asc("c4")) + .limit(10) + .build(); + +// Execute the `Scan` operation. +List results = transaction.scan(scan); +``` + +As an argument of the `where()` method, you can specify a condition, an and-wise condition set, or an or-wise condition set. After calling the `where()` method, you can add more conditions or condition sets by using the `and()` method or `or()` method as follows: + +```java +// Create a `Scan` operation with condition sets. +Scan scan = + Scan.newBuilder() + .namespace("ns") + .table("tbl") + .all() + .where( + ConditionSetBuilder.condition(ConditionBuilder.column("c1").isLessThanInt(10)) + .or(ConditionBuilder.column("c1").isGreaterThanInt(20)) + .build()) + .and( + ConditionSetBuilder.condition(ConditionBuilder.column("c2").isLikeText("a%")) + .or(ConditionBuilder.column("c2").isLikeText("b%")) + .build()) + .limit(10) + .build(); +``` + +{% capture notice--info %} +**Note** + +In the `where()` condition method chain, the conditions must be an and-wise junction of `ConditionalExpression` or `OrConditionSet` (known as conjunctive normal form) like the above example or an or-wise junction of `ConditionalExpression` or `AndConditionSet` (known as disjunctive normal form). +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +For more details about available conditions and condition sets, see the `ConditionBuilder` and `ConditionSetBuilder` page in the [Javadoc](https://javadoc.io/doc/com.scalar-labs/scalardb/latest/index.html) of the version of ScalarDB that you're using. + +#### `Put` operation + +`Put` is an operation to put a record specified by a primary key. The operation behaves as an upsert operation for a record, in which the operation updates the record if the record exists or inserts the record if the record does not exist. + +{% capture notice--info %} +**Note** + +When you update an existing record, you need to read the record by using `Get` or `Scan` before using a `Put` operation. Otherwise, the operation will fail due to a conflict. This occurs because of the specification of ScalarDB to manage transactions properly. Instead of reading the record explicitly, you can enable implicit pre-read. For details, see [Enable implicit pre-read for `Put` operations](#enable-implicit-pre-read-for-put-operations). +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +You need to create a `Put` object first, and then you can execute the object by using the `transaction.put()` method as follows: + +```java +// Create a `Put` operation. +Key partitionKey = Key.ofInt("c1", 10); +Key clusteringKey = Key.of("c2", "aaa", "c3", 100L); + +Put put = + Put.newBuilder() + .namespace("ns") + .table("tbl") + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .floatValue("c4", 1.23F) + .doubleValue("c5", 4.56) + .build(); + +// Execute the `Put` operation. +transaction.put(put); +``` + +You can also put a record with `null` values as follows: + +```java +Put put = + Put.newBuilder() + .namespace("ns") + .table("tbl") + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .floatValue("c4", null) + .doubleValue("c5", null) + .build(); +``` + +##### Enable implicit pre-read for `Put` operations + +In Consensus Commit, an application must read a record before mutating the record with `Put` and `Delete` operations to obtain the latest states of the record if the record exists. Instead of reading the record explicitly, you can enable *implicit pre-read*. By enabling implicit pre-read, if an application does not read the record explicitly in a transaction, ScalarDB will read the record on behalf of the application before committing the transaction. + +You can enable implicit pre-read for a `Put` operation by specifying `enableImplicitPreRead()` in the `Put` operation builder as follows: + +```java +Put put = + Put.newBuilder() + .namespace("ns") + .table("tbl") + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .floatValue("c4", 1.23F) + .doubleValue("c5", 4.56) + .enableImplicitPreRead() + .build(); +``` + +{% capture notice--info %} +**Note** + +If you are certain that a record you are trying to mutate does not exist, you should not enable implicit pre-read for the `Put` operation for better performance. For example, if you load initial data, you should not enable implicit pre-read. A `Put` operation without implicit pre-read is faster than `Put` operation with implicit pre-read because the operation skips an unnecessary read. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +#### `Delete` operation + +`Delete` is an operation to delete a record specified by a primary key. + +{% capture notice--info %} +**Note** + +When you delete a record, you don't have to read the record beforehand because implicit pre-read is always enabled for `Delete` operations. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +You need to create a `Delete` object first, and then you can execute the object by using the `transaction.delete()` method as follows: + +```java +// Create a `Delete` operation. +Key partitionKey = Key.ofInt("c1", 10); +Key clusteringKey = Key.of("c2", "aaa", "c3", 100L); + +Delete delete = + Delete.newBuilder() + .namespace("ns") + .table("tbl") + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .build(); + +// Execute the `Delete` operation. +transaction.delete(delete); +``` + +#### `Put` and `Delete` with a condition + +You can write arbitrary conditions (for example, a bank account balance must be equal to or more than zero) that you require a transaction to meet before being committed by implementing logic that checks the conditions in the transaction. Alternatively, you can write simple conditions in a mutation operation, such as `Put` and `Delete`. + +When a `Put` or `Delete` operation includes a condition, the operation is executed only if the specified condition is met. If the condition is not met when the operation is executed, an exception called `UnsatisfiedConditionException` will be thrown. + +{% capture notice--info %} +**Note** + +When you specify a condition in a `Put` operation, you need to read the record beforehand or enable implicit pre-read. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ + +##### Conditions for `Put` + +You can specify a condition in a `Put` operation as follows: + +```java +// Build a condition. +MutationCondition condition = + ConditionBuilder.putIf(ConditionBuilder.column("c4").isEqualToFloat(0.0F)) + .and(ConditionBuilder.column("c5").isEqualToDouble(0.0)) + .build(); + +Put put = + Put.newBuilder() + .namespace("ns") + .table("tbl") + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .floatValue("c4", 1.23F) + .doubleValue("c5", 4.56) + .condition(condition) // condition + .build(); + +// Execute the `Put` operation. +transaction.put(put); +``` + +In addition to using the `putIf` condition, you can specify the `putIfExists` and `putIfNotExists` conditions as follows: + +```java +// Build a `putIfExists` condition. +MutationCondition putIfExistsCondition = ConditionBuilder.putIfExists(); + +// Build a `putIfNotExists` condition. +MutationCondition putIfNotExistsCondition = ConditionBuilder.putIfNotExists(); +``` + +##### Conditions for `Delete` + +You can specify a condition in a `Delete` operation as follows: + +```java +// Build a condition. +MutationCondition condition = + ConditionBuilder.deleteIf(ConditionBuilder.column("c4").isEqualToFloat(0.0F)) + .and(ConditionBuilder.column("c5").isEqualToDouble(0.0)) + .build(); + +Delete delete = + Delete.newBuilder() + .namespace("ns") + .table("tbl") + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .condition(condition) // condition + .build(); + +// Execute the `Delete` operation. +transaction.delete(delete); +``` + +In addition to using the `deleteIf` condition, you can specify the `deleteIfExists` condition as follows: + +```java +// Build a `deleteIfExists` condition. +MutationCondition deleteIfExistsCondition = ConditionBuilder.deleteIfExists(); +``` + +#### Mutate operation + +Mutate is an operation to execute multiple mutations (`Put` and `Delete` operations). + +You need to create mutation objects first, and then you can execute the objects by using the `transaction.mutate()` method as follows: + +```java +// Create `Put` and `Delete` operations. +Key partitionKey = Key.ofInt("c1", 10); + +Key clusteringKeyForPut = Key.of("c2", "aaa", "c3", 100L); + +Put put = + Put.newBuilder() + .namespace("ns") + .table("tbl") + .partitionKey(partitionKey) + .clusteringKey(clusteringKeyForPut) + .floatValue("c4", 1.23F) + .doubleValue("c5", 4.56) + .build(); + +Key clusteringKeyForDelete = Key.of("c2", "bbb", "c3", 200L); + +Delete delete = + Delete.newBuilder() + .namespace("ns") + .table("tbl") + .partitionKey(partitionKey) + .clusteringKey(clusteringKeyForDelete) + .build(); + +// Execute the operations. +transaction.mutate(Arrays.asList(put, delete)); +``` + +#### Default namespace for CRUD operations + +A default namespace for all CRUD operations can be set by using a property in the ScalarDB configuration. + +```properties +scalar.db.default_namespace_name= +``` + +Any operation that does not specify a namespace will use the default namespace set in the configuration. + +```java +// This operation will target the default namespace. +Scan scanUsingDefaultNamespace = + Scan.newBuilder() + .table("tbl") + .all() + .build(); +// This operation will target the "ns" namespace. +Scan scanUsingSpecifiedNamespace = + Scan.newBuilder() + .namespace("ns") + .table("tbl") + .all() + .build(); +``` + +### Commit a transaction + +After executing CRUD operations, you need to commit a transaction to finish it. + +You can commit a transaction as follows: + +```java +// Commit a transaction. +transaction.commit(); +``` + +### Roll back or abort a transaction + +If an error occurs when executing a transaction, you can roll back or abort the transaction. + +You can roll back a transaction as follows: + +```java +// Roll back a transaction. +transaction.rollback(); +``` + +Or, you can abort a transaction as follows: + +```java +// Abort a transaction. +transaction.abort(); +``` + +For details about how to handle exceptions in ScalarDB, see [How to handle exceptions](#how-to-handle-exceptions). + +## How to handle exceptions + +When executing a transaction, you will also need to handle exceptions properly. + +{% capture notice--warning %} +**Attention** + +If you don't handle exceptions properly, you may face anomalies or data inconsistency. +{% endcapture %} + +
{{ notice--warning | markdownify }}
+ +The following sample code shows how to handle exceptions: + +```java +public class Sample { + public static void main(String[] args) throws Exception { + TransactionFactory factory = TransactionFactory.create(""); + DistributedTransactionManager transactionManager = factory.getTransactionManager(); + + int retryCount = 0; + TransactionException lastException = null; + + while (true) { + if (retryCount++ > 0) { + // Retry the transaction three times maximum. + if (retryCount >= 3) { + // Throw the last exception if the number of retries exceeds the maximum. + throw lastException; + } + + // Sleep 100 milliseconds before retrying the transaction. + TimeUnit.MILLISECONDS.sleep(100); + } + + DistributedTransaction transaction = null; + try { + // Begin a transaction. + transaction = transactionManager.begin(); + + // Execute CRUD operations in the transaction. + Optional result = transaction.get(...); + List results = transaction.scan(...); + transaction.put(...); + transaction.delete(...); + + // Commit the transaction. + transaction.commit(); + } catch (UnsatisfiedConditionException e) { + // You need to handle `UnsatisfiedConditionException` only if a mutation operation specifies a condition. + // This exception indicates the condition for the mutation operation is not met. + + try { + transaction.rollback(); + } catch (RollbackException ex) { + // Rolling back the transaction failed. Since the transaction should eventually recover, + // you don't need to do anything further. You can simply log the occurrence here. + } + + // You can handle the exception here, according to your application requirements. + + return; + } catch (UnknownTransactionStatusException e) { + // If you catch `UnknownTransactionStatusException` when committing the transaction, + // it indicates that the status of the transaction, whether it was successful or not, is unknown. + // In such a case, you need to check if the transaction is committed successfully or not and + // retry the transaction if it failed. How to identify a transaction status is delegated to users. + return; + } catch (TransactionException e) { + // For other exceptions, you can try retrying the transaction. + + // For `CrudConflictException`, `CommitConflictException`, and `TransactionNotFoundException`, + // you can basically retry the transaction. However, for the other exceptions, the transaction + // will still fail if the cause of the exception is non-transient. In such a case, you will + // exhaust the number of retries and throw the last exception. + + if (transaction != null) { + try { + transaction.rollback(); + } catch (RollbackException ex) { + // Rolling back the transaction failed. The transaction should eventually recover, + // so you don't need to do anything further. You can simply log the occurrence here. + } + } + + lastException = e; + } + } + } +} +``` + +### `TransactionException` and `TransactionNotFoundException` + +The `begin()` API could throw `TransactionException` or `TransactionNotFoundException`: + +- If you catch `TransactionException`, this exception indicates that the transaction has failed to begin due to transient or non-transient faults. You can try retrying the transaction, but you may not be able to begin the transaction due to non-transient faults. +- If you catch `TransactionNotFoundException`, this exception indicates that the transaction has failed to begin due to transient faults. In this case, you can retry the transaction. + +The `join()` API could also throw `TransactionNotFoundException`. You can handle this exception in the same way that you handle the exceptions for the `begin()` API. + +### `CrudException` and `CrudConflictException` + +The APIs for CRUD operations (`get()`, `scan()`, `put()`, `delete()`, and `mutate()`) could throw `CrudException` or `CrudConflictException`: + +- If you catch `CrudException`, this exception indicates that the transaction CRUD operation has failed due to transient or non-transient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is non-transient. +- If you catch `CrudConflictException`, this exception indicates that the transaction CRUD operation has failed due to transient faults (for example, a conflict error). In this case, you can retry the transaction from the beginning. + +### `UnsatisfiedConditionException` + +The APIs for mutation operations (`put()`, `delete()`, and `mutate()`) could also throw `UnsatisfiedConditionException`. + +If you catch `UnsatisfiedConditionException`, this exception indicates that the condition for the mutation operation is not met. You can handle this exception according to your application requirements. + +### `CommitException`, `CommitConflictException`, and `UnknownTransactionStatusException` + +The `commit()` API could throw `CommitException`, `CommitConflictException`, or `UnknownTransactionStatusException`: + +- If you catch `CommitException`, this exception indicates that committing the transaction fails due to transient or non-transient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is non-transient. +- If you catch `CommitConflictException`, this exception indicates that committing the transaction has failed due to transient faults (for example, a conflict error). In this case, you can retry the transaction from the beginning. +- If you catch `UnknownTransactionStatusException`, this exception indicates that the status of the transaction, whether it was successful or not, is unknown. In this case, you need to check if the transaction is committed successfully and retry the transaction if it has failed. + +How to identify a transaction status is delegated to users. You may want to create a transaction status table and update it transactionally with other application data so that you can get the status of a transaction from the status table. + +### Notes about some exceptions + +Although not illustrated in the sample code, the `resume()` API could also throw `TransactionNotFoundException`. This exception indicates that the transaction associated with the specified ID was not found and/or the transaction might have expired. In either case, you can retry the transaction from the beginning since the cause of this exception is basically transient. + +In the sample code, for `UnknownTransactionStatusException`, the transaction is not retried because the application must check if the transaction was successful to avoid potential duplicate operations. For other exceptions, the transaction is retried because the cause of the exception is transient or non-transient. If the cause of the exception is transient, the transaction may succeed if you retry it. However, if the cause of the exception is non-transient, the transaction will still fail even if you retry it. In such a case, you will exhaust the number of retries. + +{% capture notice--info %} +**Note** + +In the sample code, the transaction is retried three times maximum and sleeps for 100 milliseconds before it is retried. But you can choose a retry policy, such as exponential backoff, according to your application requirements. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Investigating Consensus Commit transaction manager errors + +To investigate errors when using the Consensus Commit transaction manager, you can enable a configuration that will return table metadata augmented with transaction metadata columns, which can be helpful when investigating transaction-related issues. This configuration, which is only available when troubleshooting the Consensus Commit transaction manager, enables you to see transaction metadata column details for a given table by using the `DistributedTransactionAdmin.getTableMetadata()` method. + +By adding the following configuration, `Get` and `Scan` operations results will contain [transaction metadata](schema-loader.md#internal-metadata-for-consensus-commit): + +```properties +# By default, this configuration is set to `false`. +scalar.db.consensus_commit.include_metadata.enabled=true +``` diff --git a/docs/3.12/backup-restore.md b/docs/3.12/backup-restore.md new file mode 100644 index 00000000..e194bcb4 --- /dev/null +++ b/docs/3.12/backup-restore.md @@ -0,0 +1,208 @@ +# How to Back Up and Restore Databases Used Through ScalarDB + +Since ScalarDB provides transaction capabilities on top of non-transactional or transactional databases non-invasively, you need to take special care to back up and restore the databases in a transactionally consistent way. + +This guide describes how to back up and restore the databases that ScalarDB supports. + +## Create a backup + +How you create a backup depends on which database you're using and whether or not you're using multiple databases. The following decision tree shows which approach you should take. + +```mermaid +flowchart TD + A[Are you using a single database with ScalarDB?] + A -->|Yes| B[Does the database have transaction support?] + B -->|Yes| C[Perform back up without explicit pausing] + B ---->|No| D[Perform back up with explicit pausing] + A ---->|No| D +``` + +### Back up without explicit pausing + +If you're using ScalarDB with a single database with support for transactions, you can create a backup of the database even while ScalarDB continues to accept transactions. + +{% capture notice--warning %} +**Attention** + +Before creating a backup, you should consider the safest way to create a transactionally consistent backup of your databases and understand any risks that are associated with the backup process. +{% endcapture %} + +
{{ notice--warning | markdownify }}
+ +One requirement for creating a backup in ScalarDB is that backups for all the ScalarDB-managed tables (including the Coordinator table) need to be transactionally consistent or automatically recoverable to a transactionally consistent state. That means that you need to create a consistent backup by dumping all tables in a single transaction. + +How you create a transactionally consistent backup depends on the type of database that you're using. Select a database to see how to create a transactionally consistent backup for ScalarDB. + +{% capture notice--info %} +**Note** + +The backup methods by database listed below are just examples of some of the databases that ScalarDB supports. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +
+
+ + + + +
+ +
+ +You can restore to any point within the backup retention period by using the automated backup feature. +
+
+ +Use the `mysqldump` command with the `--single-transaction` option. +
+
+ +Use the `pg_dump` command. +
+
+ +Use the `.backup` command with the `.timeout` command as specified in [Special commands to sqlite3 (dot-commands)](https://www.sqlite.org/cli.html#special_commands_to_sqlite3_dot_commands_) + +For an example, see [BASH: SQLite3 .backup command](https://stackoverflow.com/questions/23164445/bash-sqlite3-backup-command). +
+
+ +### Back up with explicit pausing + +Another way to create a transactionally consistent backup is to create a backup while a cluster of ScalarDB instances does not have any outstanding transactions. Creating the backup depends on the following: + +- If the underlying database has a point-in-time snapshot or backup feature, you can create a backup during the period when no outstanding transactions exist. +- If the underlying database has a point-in-time restore or recovery (PITR) feature, you can set a restore point to a time (preferably the mid-time) in the pause duration period when no outstanding transactions exist. + +{% capture notice--info %} +**Note** + +When using a PITR feature, you should minimize the clock drifts between clients and servers by using clock synchronization, such as NTP. Otherwise, the time you get as the paused duration might be too different from the time in which the pause was actually conducted, which could restore the backup to a point where ongoing transactions exist. + +In addition, you should pause for a sufficient amount of time (for example, five seconds) and use the mid-time of the paused duration as a restore point since clock synchronization cannot perfectly synchronize clocks between nodes. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +To make ScalarDB drain outstanding requests and stop accepting new requests so that a pause duration can be created, you should implement the [Scalar Admin](https://github.com/scalar-labs/scalar-admin) interface properly in your application that uses ScalarDB or use [ScalarDB Cluster (redirects to the Enterprise docs site)](https://scalardb.scalar-labs.com/docs/latest/scalardb-cluster/), which implements the Scalar Admin interface. + +By using the [Scalar Admin client tool](https://github.com/scalar-labs/scalar-admin/tree/main/java#scalar-admin-client-tool), you can pause nodes, servers, or applications that implement the Scalar Admin interface without losing ongoing transactions. + +How you create a transactionally consistent backup depends on the type of database that you're using. Select a database to see how to create a transactionally consistent backup for ScalarDB. + +{% capture notice--warning %} +**Note** + +The backup methods by database listed below are just examples of some of the databases that ScalarDB supports. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +
+
+ + + +
+ +
+ +Cassandra has a built-in replication feature, so you do not always have to create a transactionally consistent backup. For example, if the replication factor is set to `3` and only the data of one of the nodes in a Cassandra cluster is lost, you won't need a transactionally consistent backup (snapshot) because the node can be recovered by using a normal, transactionally inconsistent backup (snapshot) and the repair feature. + +However, if the quorum of cluster nodes loses their data, you will need a transactionally consistent backup (snapshot) to restore the cluster to a certain transactionally consistent point. + +To create a transactionally consistent cluster-wide backup (snapshot), pause the application that is using ScalarDB or [ScalarDB Cluster (redirects to the Enterprise docs site)](https://scalardb.scalar-labs.com/docs/latest/scalardb-cluster/) and create backups (snapshots) of the nodes as described in [Back up with explicit pausing](#back-up-with-explicit-pausing) or stop the Cassandra cluster, take copies of all the data in the nodes, and start the cluster. +
+
+ +You must create a Cosmos DB for NoSQL account with a continuous backup policy that has the PITR feature enabled. After enabling the feature, backups are created continuously. + +To specify a transactionally consistent restore point, pause your application that is using ScalarDB with Cosmos DB for NoSQL as described in [Back up with explicit pausing](#back-up-with-explicit-pausing). +
+
+ +You must enable the PITR feature for DynamoDB tables. If you're using [ScalarDB Schema Loader](schema-loader.md) to create schemas, the tool enables the PITR feature for tables by default. + +To specify a transactionally consistent restore point, pause your application that is using ScalarDB with DynamoDB as described in [Back up with explicit pausing](#back-up-with-explicit-pausing). +
+
+ +## Restore a backup + +How you restore a transactionally consistent backup depends on the type of database that you're using. Select a database to see how to create a transactionally consistent backup for ScalarDB. + +{% capture notice--warning %} +**Note** + +The restore methods by database listed below are just examples of some of the databases that ScalarDB supports. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +
+
+ + + + + + + +
+ +
+ +You can restore to any point within the backup retention period by using the automated backup feature. +
+
+ +First, stop all the nodes of the Cassandra cluster. Then, clean the `data`, `commitlog`, and `hints` directories, and place the backups (snapshots) in each node. + +After placing the backups (snapshots) in each node, start all the nodes of the Cassandra Cluster. +
+
+ +Follow the official Azure documentation for [restore an account by using Azure portal](https://docs.microsoft.com/en-us/azure/cosmos-db/restore-account-continuous-backup#restore-account-portal). After restoring a backup, [configure the default consistency level](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/how-to-manage-consistency#configure-the-default-consistency-level) of the restored databases to `STRONG`. In addition, you should use the mid-time of the paused duration as the restore point as previously explained. + +ScalarDB implements the Cosmos DB adapter by using its stored procedures, which are installed when creating schemas by using ScalarDB Schema Loader. However, the PITR feature of Cosmos DB doesn't restore stored procedures. Because of this, you need to re-install the required stored procedures for all tables after restoration. You can do this by using ScalarDB Schema Loader with the `--repair-all` option. For details, see [Repair tables](schema-loader.md#repair-tables). +
+
+ +Follow the official AWS documentation for [restoring a DynamoDB table to a point in time](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/PointInTimeRecovery.Tutorial.html), but keep in mind that a table can only be restored with an alias. Because of this, you will need to restore the table with an alias, delete the original table, and rename the alias to the original name to restore the tables with the same name. + +To do this procedure: + +1. Create a backup. + 1. Select the mid-time of the paused duration as the restore point. + 2. Restore by using the PITR of table A to table B. + 3. Create a backup of the restored table B (assuming that the backup is named backup B). + 4. Remove table B. +2. Restore the backup. + 1. Remove table A. + 2. Create a table named A by using backup B. + +{% capture notice--info %} +**Note** + +* You must do the steps mentioned above for each table because tables can only be restored one at a time. +* Configurations such as PITR and auto-scaling policies are reset to the default values for restored tables, so you must manually configure the required settings. For details, see the official AWS documentation for [How to restore DynamoDB tables with DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/CreateBackup.html#CreateBackup_HowItWorks-restore). +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +
+
+ +If you used `mysqldump` to create the backup file, use the `mysql` command to restore the backup as specified in [Reloading SQL-Format Backups](https://dev.mysql.com/doc/mysql-backup-excerpt/8.0/en/reloading-sql-format-dumps.html). +
+
+ +If you used `pg_dump` to create the backup file, use the `psql` command to restore the backup as specified in [Restoring the Dump](https://www.postgresql.org/docs/current/backup-dump.html#BACKUP-DUMP-RESTORE). +
+
+ +Use the `.restore` command as specified in [Special commands to sqlite3 (dot-commands)](https://www.sqlite.org/cli.html#special_commands_to_sqlite3_dot_commands_). +
+
diff --git a/docs/3.12/configurations.md b/docs/3.12/configurations.md new file mode 100644 index 00000000..696b0c79 --- /dev/null +++ b/docs/3.12/configurations.md @@ -0,0 +1,276 @@ +# ScalarDB Configurations + +This page describes the available configurations for ScalarDB. + +## ScalarDB client configurations + +ScalarDB provides its own transaction protocol called Consensus Commit. You can use the Consensus Commit protocol directly through the ScalarDB client library or through [ScalarDB Cluster (redirects to the Enterprise docs site)](https://scalardb.scalar-labs.com/docs/3.12/scalardb-cluster/), which is a component that is available only in the ScalarDB Enterprise edition. + +### Use Consensus Commit directly + +Consensus Commit is the default transaction manager type in ScalarDB. To use the Consensus Commit transaction manager, add the following to the ScalarDB properties file: + +```properties +scalar.db.transaction_manager=consensus-commit +``` + +{% capture notice--info %} +**Note** + +If you don't specify the `scalar.db.transaction_manager` property, `consensus-commit` will be the default value. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +#### Basic configurations + +The following basic configurations are available for the Consensus Commit transaction manager: + +| Name | Description | Default | +|-------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------| +| `scalar.db.transaction_manager` | `consensus-commit` should be specified. | - | +| `scalar.db.consensus_commit.isolation_level` | Isolation level used for Consensus Commit. Either `SNAPSHOT` or `SERIALIZABLE` can be specified. | `SNAPSHOT` | +| `scalar.db.consensus_commit.serializable_strategy` | Serializable strategy used for Consensus Commit. Either `EXTRA_READ` or `EXTRA_WRITE` can be specified. If `SNAPSHOT` is specified in the property `scalar.db.consensus_commit.isolation_level`, this configuration will be ignored. | `EXTRA_READ` | +| `scalar.db.consensus_commit.coordinator.namespace` | Namespace name of Coordinator tables. | `coordinator` | +| `scalar.db.consensus_commit.include_metadata.enabled` | If set to `true`, `Get` and `Scan` operations results will contain transaction metadata. To see the transaction metadata columns details for a given table, you can use the `DistributedTransactionAdmin.getTableMetadata()` method, which will return the table metadata augmented with the transaction metadata columns. Using this configuration can be useful to investigate transaction-related issues. | `false` | + +#### Performance-related configurations + +The following performance-related configurations are available for the Consensus Commit transaction manager: + +| Name | Description | Default | +|-----------------------------------------------------------------|--------------------------------------------------------------------------------|-------------------------------------------------------------------| +| `scalar.db.consensus_commit.parallel_executor_count` | Number of executors (threads) for parallel execution. | `128` | +| `scalar.db.consensus_commit.parallel_preparation.enabled` | Whether or not the preparation phase is executed in parallel. | `true` | +| `scalar.db.consensus_commit.parallel_validation.enabled` | Whether or not the validation phase (in `EXTRA_READ`) is executed in parallel. | The value of `scalar.db.consensus_commit.parallel_commit.enabled` | +| `scalar.db.consensus_commit.parallel_commit.enabled` | Whether or not the commit phase is executed in parallel. | `true` | +| `scalar.db.consensus_commit.parallel_rollback.enabled` | Whether or not the rollback phase is executed in parallel. | The value of `scalar.db.consensus_commit.parallel_commit.enabled` | +| `scalar.db.consensus_commit.async_commit.enabled` | Whether or not the commit phase is executed asynchronously. | `false` | +| `scalar.db.consensus_commit.async_rollback.enabled` | Whether or not the rollback phase is executed asynchronously. | The value of `scalar.db.consensus_commit.async_commit.enabled` | +| `scalar.db.consensus_commit.parallel_implicit_pre_read.enabled` | Whether or not implicit pre-read is executed in parallel. | `true` | + +#### Underlying storage or database configurations + +Consensus Commit has a storage abstraction layer and supports multiple underlying storages. You can specify the storage implementation by using the `scalar.db.storage` property. + +Select a database to see the configurations available for each storage. + +
+
+ + + + +
+ +
+ +The following configurations are available for Cassandra: + +| Name | Description | Default | +|-----------------------------------------|-----------------------------------------------------------------------|------------| +| `scalar.db.storage` | `cassandra` must be specified. | - | +| `scalar.db.contact_points` | Comma-separated contact points. | | +| `scalar.db.contact_port` | Port number for all the contact points. | | +| `scalar.db.username` | Username to access the database. | | +| `scalar.db.password` | Password to access the database. | | +| `scalar.db.cassandra.metadata.keyspace` | Keyspace name for the namespace and table metadata used for ScalarDB. | `scalardb` | + +
+
+ +The following configurations are available for CosmosDB for NoSQL: + +| Name | Description | Default | +|--------------------------------------------|----------------------------------------------------------------------------------------------------------|------------| +| `scalar.db.storage` | `cosmos` must be specified. | - | +| `scalar.db.contact_points` | Azure Cosmos DB for NoSQL endpoint with which ScalarDB should communicate. | | +| `scalar.db.password` | Either a master or read-only key used to perform authentication for accessing Azure Cosmos DB for NoSQL. | | +| `scalar.db.cosmos.table_metadata.database` | Database name for the table metadata used for ScalarDB. | `scalardb` | +| `scalar.db.cosmos.consistency_level` | Consistency level used for Cosmos DB operations. `STRONG` or `BOUNDED_STALENESS` can be specified. | `STRONG` | + +
+
+ +The following configurations are available for DynamoDB: + +| Name | Description | Default | +|---------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------| +| `scalar.db.storage` | `dynamo` must be specified. | - | +| `scalar.db.contact_points` | AWS region with which ScalarDB should communicate (e.g., `us-east-1`). | | +| `scalar.db.username` | AWS access key used to identify the user interacting with AWS. | | +| `scalar.db.password` | AWS secret access key used to authenticate the user interacting with AWS. | | +| `scalar.db.dynamo.endpoint_override` | Amazon DynamoDB endpoint with which ScalarDB should communicate. This is primarily used for testing with a local instance instead of an AWS service. | | +| `scalar.db.dynamo.table_metadata.namespace` | Namespace name for the table metadata used for ScalarDB. | `scalardb` | +| `scalar.db.dynamo.namespace.prefix` | Prefix for the user namespaces and metadata namespace names. Since AWS requires having unique tables names in a single AWS region, this is useful if you want to use multiple ScalarDB environments (development, production, etc.) in a single AWS region. | | + +
+
+ +The following configurations are available for JDBC databases: + +| Name | Description | Default | +|-----------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------| +| `scalar.db.storage` | `jdbc` must be specified. | - | +| `scalar.db.contact_points` | JDBC connection URL. | | +| `scalar.db.username` | Username to access the database. | | +| `scalar.db.password` | Password to access the database. | | +| `scalar.db.jdbc.connection_pool.min_idle` | Minimum number of idle connections in the connection pool. | `20` | +| `scalar.db.jdbc.connection_pool.max_idle` | Maximum number of connections that can remain idle in the connection pool. | `50` | +| `scalar.db.jdbc.connection_pool.max_total` | Maximum total number of idle and borrowed connections that can be active at the same time for the connection pool. Use a negative value for no limit. | `100` | +| `scalar.db.jdbc.prepared_statements_pool.enabled` | Setting this property to `true` enables prepared-statement pooling. | `false` | +| `scalar.db.jdbc.prepared_statements_pool.max_open` | Maximum number of open statements that can be allocated from the statement pool at the same time. Use a negative value for no limit. | `-1` | +| `scalar.db.jdbc.isolation_level` | Isolation level for JDBC. `READ_UNCOMMITTED`, `READ_COMMITTED`, `REPEATABLE_READ`, or `SERIALIZABLE` can be specified. | Underlying-database specific | +| `scalar.db.jdbc.table_metadata.schema` | Schema name for the table metadata used for ScalarDB. | `scalardb` | +| `scalar.db.jdbc.table_metadata.connection_pool.min_idle` | Minimum number of idle connections in the connection pool for the table metadata. | `5` | +| `scalar.db.jdbc.table_metadata.connection_pool.max_idle` | Maximum number of connections that can remain idle in the connection pool for the table metadata. | `10` | +| `scalar.db.jdbc.table_metadata.connection_pool.max_total` | Maximum total number of idle and borrowed connections that can be active at the same time for the connection pool for the table metadata. Use a negative value for no limit. | `25` | +| `scalar.db.jdbc.admin.connection_pool.min_idle` | Minimum number of idle connections in the connection pool for admin. | `5` | +| `scalar.db.jdbc.admin.connection_pool.max_idle` | Maximum number of connections that can remain idle in the connection pool for admin. | `10` | +| `scalar.db.jdbc.admin.connection_pool.max_total` | Maximum total number of idle and borrowed connections that can be active at the same time for the connection pool for admin. Use a negative value for no limit. | `25` | + +{% capture notice--info %} +**Note** + +If you use SQLite3 as a JDBC database, you must set `scalar.db.contact_points` as follows. + +```properties +scalar.db.contact_points=jdbc:sqlite:.sqlite3?busy_timeout=10000 +``` + +Unlike other JDBC databases, [SQLite3 does not fully support concurrent access](https://www.sqlite.org/lang_transaction.html). +To avoid frequent errors caused internally by [`SQLITE_BUSY`](https://www.sqlite.org/rescode.html#busy), we recommend setting a [`busy_timeout`](https://www.sqlite.org/c3ref/busy_timeout.html) parameter. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +
+
+ +##### Multi-storage support + +ScalarDB supports using multiple storage implementations simultaneously. You can use multiple storages by specifying `multi-storage` as the value for the `scalar.db.storage` property. + +For details about using multiple storages, see [Multi-Storage Transactions](multi-storage-transactions.md). + +### Use Consensus Commit through ScalarDB Cluster + +[ScalarDB Cluster (redirects to the Enterprise docs site)](https://scalardb.scalar-labs.com/docs/3.12/scalardb-cluster/) is a component that provides a gRPC interface to ScalarDB. + +For details about client configurations, see the ScalarDB Cluster [client configurations (redirects to the Enterprise docs site)](https://scalardb.scalar-labs.com/docs/3.12/scalardb-cluster/developer-guide-for-scalardb-cluster-with-java-api/#client-configurations). + +## Cross-partition scan configurations + +By enabling the cross-partition scan option below, the `Scan` operation can retrieve all records across partitions. In addition, you can specify arbitrary conditions and orderings in the cross-partition `Scan` operation by enabling `cross_partition_scan.filtering` and `cross_partition_scan.ordering`, respectively. Currently, the cross-partition scan with filtering and ordering is available only for JDBC databases. To enable filtering and ordering, `scalar.db.cross_partition_scan.enabled` must be set to `true`. + +For details on how to use cross-partition scan, see [Scan operation](./api-guide.md#scan-operation). + +{% capture notice--warning %} +**Attention** + +For non-JDBC databases, we do not recommend enabling cross-partition scan with the `SERIALIAZABLE` isolation level because transactions could be executed at a lower isolation level (that is, `SNAPSHOT`). When using non-JDBC databases, use cross-partition scan at your own risk only if consistency does not matter for your transactions. +{% endcapture %} + +
{{ notice--warning | markdownify }}
+ +| Name | Description | Default | +|----------------------------------------------------|-----------------------------------------------|---------| +| `scalar.db.cross_partition_scan.enabled` | Enable cross-partition scan. | `false` | +| `scalar.db.cross_partition_scan.filtering.enabled` | Enable filtering in cross-partition scan. | `false` | +| `scalar.db.cross_partition_scan.ordering.enabled` | Enable ordering in cross-partition scan. | `false` | + +## Other ScalarDB configurations + +The following are additional configurations available for ScalarDB: + +| Name | Description | Default | +|------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------| +| `scalar.db.metadata.cache_expiration_time_secs` | ScalarDB has a metadata cache to reduce the number of requests to the database. This setting specifies the expiration time of the cache in seconds. | `-1` (no expiration) | +| `scalar.db.active_transaction_management.expiration_time_millis` | ScalarDB maintains ongoing transactions, which can be resumed by using a transaction ID. This setting specifies the expiration time of this transaction management feature in milliseconds. | `-1` (no expiration) | +| `scalar.db.default_namespace_name` | The given namespace name will be used by operations that do not already specify a namespace. | | + +## Placeholder usage + +You can use placeholders in the values, and they are replaced with environment variables (`${env:}`) or system properties (`${sys:}`). You can also specify default values in placeholders like `${sys::-}`. + +The following is an example of a configuration that uses placeholders: + +```properties +scalar.db.username=${env::-admin} +scalar.db.password=${env:} +``` + +In this example configuration, ScalarDB reads the username and password from environment variables. If the environment variable `SCALAR_DB_USERNAME` does not exist, ScalarDB uses the default value `admin`. + +## Configuration examples + +This section provides some configuration examples. + +### Configuration example #1 - App and database + +```mermaid +flowchart LR + app["App
(ScalarDB library with
Consensus Commit)"] + db[(Underlying storage or database)] + app --> db +``` + +In this example configuration, the app (ScalarDB library with Consensus Commit) connects to an underlying storage or database (in this case, Cassandra) directly. + +{% capture notice--warning %} +**Attention** + +This configuration exists only for development purposes and isn’t suitable for a production environment. This is because the app needs to implement the [Scalar Admin](https://github.com/scalar-labs/scalar-admin) interface to take transactionally consistent backups for ScalarDB, which requires additional configurations. +{% endcapture %} + +
{{ notice--warning | markdownify }}
+ +The following is an example of the configuration for connecting the app to the underlying database through ScalarDB: + +```properties +# Transaction manager implementation. +scalar.db.transaction_manager=consensus-commit + +# Storage implementation. +scalar.db.storage=cassandra + +# Comma-separated contact points. +scalar.db.contact_points= + +# Credential information to access the database. +scalar.db.username= +scalar.db.password= +``` + +### Configuration example #2 - App, ScalarDB Cluster, and database + +```mermaid +flowchart LR + app["App -
ScalarDB library with gRPC"] + cluster["ScalarDB Cluster -
(ScalarDB library with
Consensus Commit)"] + db[(Underlying storage or database)] + app --> cluster --> db +``` + +In this example configuration, the app (ScalarDB library with gRPC) connects to an underlying storage or database (in this case, Cassandra) through ScalarDB Cluster, which is a component that is available only in the ScalarDB Enterprise edition. + +{% capture notice--info %} +**Note** + +This configuration is acceptable for production use because ScalarDB Cluster implements the [Scalar Admin](https://github.com/scalar-labs/scalar-admin) interface, which enables you to take transactionally consistent backups for ScalarDB by pausing ScalarDB Cluster. + +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +The following is an example of the configuration for connecting the app to the underlying database through ScalarDB Cluster: + +```properties +# Transaction manager implementation. +scalar.db.transaction_manager=cluster + +# Contact point of the cluster. +scalar.db.contact_points=indirect: +``` + +For details about client configurations, see the ScalarDB Cluster [client configurations (redirects to the Enterprise docs site)](https://scalardb.scalar-labs.com/docs/3.12/scalardb-cluster/developer-guide-for-scalardb-cluster-with-java-api/#client-configurations). diff --git a/docs/3.12/design.md b/docs/3.12/design.md new file mode 100644 index 00000000..68ca06f1 --- /dev/null +++ b/docs/3.12/design.md @@ -0,0 +1,6 @@ +# ScalarDB Design Document + +For details about the design and implementation of ScalarDB, please see the following documents, which we presented at the VLDB 2023 conference: + +- **Speakerdeck presentation:** [ScalarDB: Universal Transaction Manager for Polystores](https://speakerdeck.com/scalar/scalardb-universal-transaction-manager-for-polystores-vldb23) +- **Detailed paper:** [ScalarDB: Universal Transaction Manager for Polystores](https://www.vldb.org/pvldb/vol16/p3768-yamada.pdf) diff --git a/docs/3.12/development-configurations.md b/docs/3.12/development-configurations.md new file mode 100644 index 00000000..5037e802 --- /dev/null +++ b/docs/3.12/development-configurations.md @@ -0,0 +1,11 @@ +--- +toc: false +--- + +# Configuration Guides for ScalarDB + +The following is a list of configuration guides for ScalarDB: + +- [ScalarDB Configurations](configurations.md) +- [Multi-Storage Transactions](multi-storage-transactions.md) +- [Transactions with a Two-Phase Commit Interface](two-phase-commit-transactions.md) diff --git a/docs/3.12/getting-started-kotlin/build.gradle.kts b/docs/3.12/getting-started-kotlin/build.gradle.kts new file mode 100644 index 00000000..e1722463 --- /dev/null +++ b/docs/3.12/getting-started-kotlin/build.gradle.kts @@ -0,0 +1,30 @@ +import org.jetbrains.kotlin.gradle.tasks.KotlinCompile + +plugins { + kotlin("jvm") version "1.8.21" + application +} + +group = "com.scalar-labs" +version = "1.0-SNAPSHOT" + +repositories { + mavenCentral() +} + +dependencies { + implementation("com.scalar-labs", "scalardb", "3.12.0") + testImplementation(kotlin("test")) +} + +tasks.test { + useJUnitPlatform() +} + +tasks.withType { + kotlinOptions.jvmTarget = "1.8" +} + +application { + mainClass.set("MainKt") +} diff --git a/docs/3.12/getting-started-kotlin/gradle.properties b/docs/3.12/getting-started-kotlin/gradle.properties new file mode 100644 index 00000000..7fc6f1ff --- /dev/null +++ b/docs/3.12/getting-started-kotlin/gradle.properties @@ -0,0 +1 @@ +kotlin.code.style=official diff --git a/docs/3.12/getting-started-kotlin/gradle/wrapper/gradle-wrapper.jar b/docs/3.12/getting-started-kotlin/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 00000000..033e24c4 Binary files /dev/null and b/docs/3.12/getting-started-kotlin/gradle/wrapper/gradle-wrapper.jar differ diff --git a/docs/3.12/getting-started-kotlin/gradle/wrapper/gradle-wrapper.properties b/docs/3.12/getting-started-kotlin/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 00000000..9f4197d5 --- /dev/null +++ b/docs/3.12/getting-started-kotlin/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,7 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-8.2.1-bin.zip +networkTimeout=10000 +validateDistributionUrl=true +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/docs/3.12/getting-started-kotlin/gradlew b/docs/3.12/getting-started-kotlin/gradlew new file mode 100755 index 00000000..fcb6fca1 --- /dev/null +++ b/docs/3.12/getting-started-kotlin/gradlew @@ -0,0 +1,248 @@ +#!/bin/sh + +# +# Copyright © 2015-2021 the original authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################## +# +# Gradle start up script for POSIX generated by Gradle. +# +# Important for running: +# +# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is +# noncompliant, but you have some other compliant shell such as ksh or +# bash, then to run this script, type that shell name before the whole +# command line, like: +# +# ksh Gradle +# +# Busybox and similar reduced shells will NOT work, because this script +# requires all of these POSIX shell features: +# * functions; +# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», +# «${var#prefix}», «${var%suffix}», and «$( cmd )»; +# * compound commands having a testable exit status, especially «case»; +# * various built-in commands including «command», «set», and «ulimit». +# +# Important for patching: +# +# (2) This script targets any POSIX shell, so it avoids extensions provided +# by Bash, Ksh, etc; in particular arrays are avoided. +# +# The "traditional" practice of packing multiple parameters into a +# space-separated string is a well documented source of bugs and security +# problems, so this is (mostly) avoided, by progressively accumulating +# options in "$@", and eventually passing that to Java. +# +# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, +# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; +# see the in-line comments for details. +# +# There are tweaks for specific operating systems such as AIX, CygWin, +# Darwin, MinGW, and NonStop. +# +# (3) This script is generated from the Groovy template +# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# within the Gradle project. +# +# You can find Gradle at https://github.com/gradle/gradle/. +# +############################################################################## + +# Attempt to set APP_HOME + +# Resolve links: $0 may be a link +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac +done + +# This is normally unused +# shellcheck disable=SC2034 +APP_BASE_NAME=${0##*/} +APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD=maximum + +warn () { + echo "$*" +} >&2 + +die () { + echo + echo "$*" + echo + exit 1 +} >&2 + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD=$JAVA_HOME/jre/sh/java + else + JAVACMD=$JAVA_HOME/bin/java + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD=java + if ! command -v java >/dev/null 2>&1 + then + die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +fi + +# Increase the maximum file descriptors if we can. +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC3045 + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC3045 + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac +fi + +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. + +# For Cygwin or MSYS, switch paths to Windows format before running java +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) + + # Now convert the arguments - kludge to limit ourselves to /bin/sh + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) + fi + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg + done +fi + + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Collect all arguments for the java command; +# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of +# shell script including quotes and variable substitutions, so put them in +# double quotes to make sure that they get re-expanded; and +# * put everything else in single quotes, so that it's not re-expanded. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Stop when "xargs" is not available. +if ! command -v xargs >/dev/null 2>&1 +then + die "xargs is not available" +fi + +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# + +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' + +exec "$JAVACMD" "$@" diff --git a/docs/3.12/getting-started-kotlin/gradlew.bat b/docs/3.12/getting-started-kotlin/gradlew.bat new file mode 100644 index 00000000..93e3f59f --- /dev/null +++ b/docs/3.12/getting-started-kotlin/gradlew.bat @@ -0,0 +1,92 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%"=="" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%"=="" set DIRNAME=. +@rem This is normally unused +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if %ERRORLEVEL% equ 0 goto execute + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if %ERRORLEVEL% equ 0 goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +set EXIT_CODE=%ERRORLEVEL% +if %EXIT_CODE% equ 0 set EXIT_CODE=1 +if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% +exit /b %EXIT_CODE% + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/docs/3.12/getting-started-kotlin/scalardb.properties b/docs/3.12/getting-started-kotlin/scalardb.properties new file mode 100644 index 00000000..b1dbcbde --- /dev/null +++ b/docs/3.12/getting-started-kotlin/scalardb.properties @@ -0,0 +1,12 @@ +# Comma-separated contact points +scalar.db.contact_points=localhost + +# Port number for all the contact points. Default port number for each database is used if empty. +#scalar.db.contact_port= + +# Credential information to access the database +scalar.db.username=cassandra +scalar.db.password=cassandra + +# Storage implementation. Either cassandra or cosmos or dynamo or jdbc can be set. Default storage is cassandra. +#scalar.db.storage=cassandra diff --git a/docs/3.12/getting-started-kotlin/settings.gradle.kts b/docs/3.12/getting-started-kotlin/settings.gradle.kts new file mode 100644 index 00000000..897772e3 --- /dev/null +++ b/docs/3.12/getting-started-kotlin/settings.gradle.kts @@ -0,0 +1,3 @@ + +rootProject.name = "scalardb-kotlin-sample" + diff --git a/docs/3.12/getting-started-kotlin/src/main/kotlin/Main.kt b/docs/3.12/getting-started-kotlin/src/main/kotlin/Main.kt new file mode 100644 index 00000000..543fe10b --- /dev/null +++ b/docs/3.12/getting-started-kotlin/src/main/kotlin/Main.kt @@ -0,0 +1,67 @@ +import sample.ElectronicMoney +import java.io.File +import kotlin.system.exitProcess + +fun main(args: Array) { + var action: String? = null + var amount = 0 + var to: String? = null + var from: String? = null + var id: String? = null + var scalarDBProperties: String? = null + var i = 0 + while (i < args.size) { + if ("-action" == args[i]) { + action = args[++i] + } else if ("-amount" == args[i]) { + amount = args[++i].toInt() + } else if ("-to" == args[i]) { + to = args[++i] + } else if ("-from" == args[i]) { + from = args[++i] + } else if ("-id" == args[i]) { + id = args[++i] + } else if ("-config" == args[i]) { + scalarDBProperties = args[++i] + } else if ("-help" == args[i]) { + printUsageAndExit() + return + } + ++i + } + if (action == null) { + printUsageAndExit() + return + } + val eMoney = ElectronicMoney( + scalarDBProperties ?: (System.getProperty("user.dir") + File.separator + "scalardb.properties") + ) + if (action.equals("charge", ignoreCase = true)) { + if (to == null || amount < 0) { + printUsageAndExit() + return + } + eMoney.charge(to, amount) + } else if (action.equals("pay", ignoreCase = true)) { + if (to == null || amount < 0 || from == null) { + printUsageAndExit() + return + } + eMoney.pay(from, to, amount) + } else if (action.equals("getBalance", ignoreCase = true)) { + if (id == null) { + printUsageAndExit() + return + } + val balance = eMoney.getBalance(id) + println("The balance for $id is $balance") + } + eMoney.close() +} + +fun printUsageAndExit() { + System.err.println( + "ElectronicMoneyMain -action charge/pay/getBalance [-amount number (needed for charge and pay)] [-to id (needed for charge and pay)] [-from id (needed for pay)] [-id id (needed for getBalance)]" + ) + exitProcess(1) +} diff --git a/docs/3.12/getting-started-kotlin/src/main/kotlin/sample/ElectronicMoney.kt b/docs/3.12/getting-started-kotlin/src/main/kotlin/sample/ElectronicMoney.kt new file mode 100644 index 00000000..15cefaa5 --- /dev/null +++ b/docs/3.12/getting-started-kotlin/src/main/kotlin/sample/ElectronicMoney.kt @@ -0,0 +1,141 @@ +package sample + +import com.scalar.db.api.DistributedTransactionManager +import com.scalar.db.api.Get +import com.scalar.db.api.Put +import com.scalar.db.exception.transaction.TransactionException +import com.scalar.db.io.Key +import com.scalar.db.service.TransactionFactory + +class ElectronicMoney(scalarDBProperties: String) { + companion object { + private const val NAMESPACE = "emoney" + private const val TABLENAME = "account" + private const val ID = "id" + private const val BALANCE = "balance" + } + + private val manager: DistributedTransactionManager + + init { + val factory = TransactionFactory.create(scalarDBProperties) + manager = factory.transactionManager + } + + @Throws(TransactionException::class) + fun charge(id: String, amount: Int) { + // Start a transaction + val tx = manager.start() + try { + // Retrieve the current balance for id + val get = Get.newBuilder() + .namespace(NAMESPACE) + .table(TABLENAME) + .partitionKey(Key.ofText(ID, id)) + .build() + val result = tx.get(get) + + // Calculate the balance + var balance = amount + if (result.isPresent) { + val current = result.get().getInt(BALANCE) + balance += current + } + + // Update the balance + val put = Put.newBuilder() + .namespace(NAMESPACE) + .table(TABLENAME) + .partitionKey(Key.ofText(ID, id)) + .intValue(BALANCE, balance) + .build() + tx.put(put) + + // Commit the transaction (records are automatically recovered in case of failure) + tx.commit() + } catch (e: Exception) { + tx.abort() + throw e + } + } + + @Throws(TransactionException::class) + fun pay(fromId: String, toId: String, amount: Int) { + // Start a transaction + val tx = manager.start() + try { + // Retrieve the current balances for ids + val fromGet = Get.newBuilder() + .namespace(NAMESPACE) + .table(TABLENAME) + .partitionKey(Key.ofText(ID, fromId)) + .build() + val toGet = Get.newBuilder() + .namespace(NAMESPACE) + .table(TABLENAME) + .partitionKey(Key.ofText(ID, toId)) + .build() + val fromResult = tx.get(fromGet) + val toResult = tx.get(toGet) + + // Calculate the balances (it assumes that both accounts exist) + val newFromBalance = fromResult.get().getInt(BALANCE) - amount + val newToBalance = toResult.get().getInt(BALANCE) + amount + if (newFromBalance < 0) { + throw RuntimeException("$fromId doesn't have enough balance.") + } + + // Update the balances + val fromPut = Put.newBuilder() + .namespace(NAMESPACE) + .table(TABLENAME) + .partitionKey(Key.ofText(ID, fromId)) + .intValue(BALANCE, newFromBalance) + .build() + val toPut = Put.newBuilder() + .namespace(NAMESPACE) + .table(TABLENAME) + .partitionKey(Key.ofText(ID, toId)) + .intValue(BALANCE, newToBalance) + .build() + tx.put(fromPut) + tx.put(toPut) + + // Commit the transaction (records are automatically recovered in case of failure) + tx.commit() + } catch (e: Exception) { + tx.abort() + throw e + } + } + + @Throws(TransactionException::class) + fun getBalance(id: String): Int { + // Start a transaction + val tx = manager.start() + return try { + // Retrieve the current balances for id + val get = Get.newBuilder() + .namespace(NAMESPACE) + .table(TABLENAME) + .partitionKey(Key.ofText(ID, id)) + .build() + val result = tx.get(get) + var balance = -1 + if (result.isPresent) { + balance = result.get().getInt(BALANCE) + } + + // Commit the transaction + tx.commit() + balance + } catch (e: Exception) { + tx.abort() + throw e + } + } + + fun close() { + manager.close() + } +} diff --git a/docs/3.12/getting-started-with-scalardb-by-using-kotlin.md b/docs/3.12/getting-started-with-scalardb-by-using-kotlin.md new file mode 100644 index 00000000..fb08c368 --- /dev/null +++ b/docs/3.12/getting-started-with-scalardb-by-using-kotlin.md @@ -0,0 +1,316 @@ +# Getting Started with ScalarDB by Using Kotlin + +This getting started tutorial explains how to configure your preferred database in ScalarDB and set up a basic electronic money application by using Kotlin. Since Kotlin has Java interoperability, you can use ScalarDB directly from Kotlin. + +{% capture notice--warning %} +**Warning** + +The electronic money application is simplified for this tutorial and isn't suitable for a production environment. +{% endcapture %} + +
{{ notice--warning | markdownify }}
+ +## Install a JDK + +Because ScalarDB is written in Java, you must have one of the following Java Development Kits (JDKs) installed in your environment: + +- [Oracle JDK](https://www.oracle.com/java/technologies/downloads/) LTS version (8, 11, or 17) +- [OpenJDK](https://openjdk.org/install/) LTS version (8, 11, or 17) + +{% capture notice--info %} +**Note** + +We recommend using the LTS versions mentioned above, but other non-LTS versions may work. + +In addition, other JDKs should work with ScalarDB, but we haven't tested them. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Clone the `scalardb` repository + +Open a terminal window, and go to your working directory. Then, clone the [scalardb](https://github.com/scalar-labs/scalardb) repository by running the following command: + +```shell +$ git clone https://github.com/scalar-labs/scalardb +``` + +Then, go to the `scalardb/docs/getting-started-kotlin` directory in the cloned repository by running the following command: + +```shell +$ cd scalardb/docs/getting-started-kotlin +``` + +## Set up your database for ScalarDB + +Select your database, and follow the instructions to configure it for ScalarDB. + +For a list of databases that ScalarDB supports, see [Supported Databases](scalardb-supported-databases.md). + +
+
+ + + + +
+ +
+ +Confirm that you have Cassandra installed. If Cassandra isn't installed, visit [Downloading Cassandra](https://cassandra.apache.org/_/download.html). + +### Configure Cassandra +{:.no_toc} + +Open **cassandra.yaml** in your preferred IDE. Then, change `commitlog_sync` from `periodic` to `batch` so that you don't lose data if a quorum of replica nodes goes down. + +### Configure ScalarDB +{:.no_toc} + +The following instructions assume that you have properly installed and configured the JDK and Cassandra in your local environment, and Cassandra is running on your localhost. + +The **scalardb.properties** file in the `docs/getting-started-kotlin` directory holds database configurations for ScalarDB. The following is a basic configuration for Cassandra. Be sure to change the values for `scalar.db.username` and `scalar.db.password` as described. + +```properties +# The Cassandra storage implementation is used for Consensus Commit. +scalar.db.storage=cassandra + +# Comma-separated contact points. +scalar.db.contact_points=localhost + +# The port number for all the contact points. +scalar.db.contact_port=9042 + +# The username and password to access the database. +scalar.db.username= +scalar.db.password= +``` +
+
+ +To use Azure Cosmos DB for NoSQL, you must have an Azure account. If you don't have an Azure account, visit [Create an Azure Cosmos DB account](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/quickstart-portal#create-account). + +### Configure Cosmos DB for NoSQL +{:.no_toc} + +Set the **default consistency level** to **Strong** according to the official document at [Configure the default consistency level](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/how-to-manage-consistency#configure-the-default-consistency-level). + +### Configure ScalarDB +{:.no_toc} + +The following instructions assume that you have properly installed and configured the JDK in your local environment and properly configured your Cosmos DB for NoSQL account in Azure. + +The **scalardb.properties** file in the `docs/getting-started-kotlin` directory holds database configurations for ScalarDB. Be sure to change the values for `scalar.db.contact_points` and `scalar.db.password` as described. + +```properties +# The Cosmos DB for NoSQL storage implementation is used for Consensus Commit. +scalar.db.storage=cosmos + +# The Cosmos DB for NoSQL URI. +scalar.db.contact_points= + +# The Cosmos DB for NoSQL key to access the database. +scalar.db.password= +``` + +{% capture notice--info %} +**Note** + +You can use a primary key or a secondary key as the value for `scalar.db.password`. +{% endcapture %} +
{{ notice--info | markdownify }}
+
+
+ +To use Amazon DynamoDB, you must have an AWS account. If you don't have an AWS account, visit [Getting started: Are you a first-time AWS user?](https://docs.aws.amazon.com/accounts/latest/reference/welcome-first-time-user.html). + +### Configure ScalarDB +{:.no_toc} + +The following instructions assume that you have properly installed and configured the JDK in your local environment. + +The **scalardb.properties** file in the `docs/getting-started-kotlin` directory holds database configurations for ScalarDB. Be sure to change the values for `scalar.db.contact_points`, `scalar.db.username`, and `scalar.db.password` as described. + +```properties +# The DynamoDB storage implementation is used for Consensus Commit. +scalar.db.storage=dynamo + +# The AWS region. +scalar.db.contact_points= + +# The AWS access key ID and secret access key to access the database. +scalar.db.username= +scalar.db.password= +``` +
+
+ +Confirm that you have a JDBC database installed. For a list of supported JDBC databases, see [Supported Databases](scalardb-supported-databases.md). + +### Configure ScalarDB +{:.no_toc} + +The following instructions assume that you have properly installed and configured the JDK and JDBC database in your local environment, and the JDBC database is running on your localhost. + +The **scalardb.properties** file in the `docs/getting-started-kotlin` directory holds database configurations for ScalarDB. The following is a basic configuration for JDBC databases. + +{% capture notice--info %} +**Note** + +Be sure to uncomment the `scalar.db.contact_points` variable and change the value of the JDBC database you are using, and change the values for `scalar.db.username` and `scalar.db.password` as described. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +```properties +# The JDBC database storage implementation is used for Consensus Commit. +scalar.db.storage=jdbc + +# The JDBC database URL for the type of database you are using. +# scalar.db.contact_points=jdbc:mysql://localhost:3306/ +# scalar.db.contact_points=jdbc:oracle:thin:@//localhost:1521/ +# scalar.db.contact_points=jdbc:postgresql://localhost:5432/ +# scalar.db.contact_points=jdbc:sqlserver://localhost:1433; +# scalar.db.contact_points=jdbc:sqlite://localhost:3306.sqlite3?busy_timeout=10000 + +# The username and password for connecting to the database. +scalar.db.username= +scalar.db.password= +``` +
+
+ +## Create and load the database schema + +You need to define the database schema (the method in which the data will be organized) in the application. For details about the supported data types, see [Data type mapping between ScalarDB and other databases](schema-loader.md#data-type-mapping-between-scalardb-and-the-other-databases). + +For this tutorial, create a file named **emoney.json** in the `scalardb/docs/getting-started-kotlin` directory. Then, add the following JSON code to define the schema. + +```json +{ + "emoney.account": { + "transaction": true, + "partition-key": [ + "id" + ], + "clustering-key": [], + "columns": { + "id": "TEXT", + "balance": "INT" + } + } +} +``` + +To apply the schema, go to the [`scalardb` Releases](https://github.com/scalar-labs/scalardb/releases) page and download the ScalarDB Schema Loader that matches the version of ScalarDB that you are using to the `getting-started` folder. + +Then, run the following command, replacing `` with the version of the ScalarDB Schema Loader that you downloaded: + +```shell +$ java -jar scalardb-schema-loader-.jar --config scalardb.properties --schema-file emoney.json --coordinator +``` + +{% capture notice--info %} +**Note** + +The `--coordinator` option is specified because a table with `transaction` set to `true` exists in the schema. For details about configuring and loading a schema, see [ScalarDB Schema Loader](schema-loader.md). +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Execute transactions and retrieve data in the basic electronic money application + +After loading the schema, you can execute transactions and retrieve data in the basic electronic money application that is included in the repository that you cloned. + +The application supports the following types of transactions: + +- Create an account. +- Add funds to an account. +- Send funds between two accounts. +- Get an account balance. + +{% capture notice--info %} +**Note** + +When you first execute a Gradle command, Gradle will automatically install the necessary libraries. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Create an account with a balance + +You need an account with a balance so that you can send funds between accounts. + +To create an account for **customer1** that has a balance of **500**, run the following command: + +```shell +$ ./gradlew run --args="-action charge -amount 500 -to customer1" +``` + +### Create an account without a balance + +After setting up an account that has a balance, you need another account for sending funds to. + +To create an account for **merchant1** that has a balance of **0**, run the following command: + +```shell +$ ./gradlew run --args="-action charge -amount 0 -to merchant1" +``` + +### Add funds to an account + +You can add funds to an account in the same way that you created and added funds to an account in [Create an account with a balance](#create-an-account-with-a-balance). + +To add **500** to the account for **customer1**, run the following command: + +```shell +$ ./gradlew run --args="-action charge -amount 500 -to customer1" +``` + +The account for **customer1** will now have a balance of **1000**. + +### Send electronic money between two accounts + +Now that you have created two accounts, with at least one of those accounts having a balance, you can send funds from one account to the other account. + +To have **customer1** pay **100** to **merchant1**, run the following command: + +```shell +$ ./gradlew run --args="-action pay -amount 100 -from customer1 -to merchant1" +``` + +### Get an account balance + +After sending funds from one account to the other, you can check the balance of each account. + +To get the balance of **customer1**, run the following command: + +```shell +$ ./gradlew run --args="-action getBalance -id customer1" +``` + +You should see the following output: + +```shell +... +The balance for customer1 is 900 +... +``` + +To get the balance of **merchant1**, run the following command: + +```shell +$ ./gradlew run --args="-action getBalance -id merchant1" +``` + +You should see the following output: + +```shell +... +The balance for merchant1 is 100 +... +``` + +## Reference + +To see the source code for the electronic money application used in this tutorial, see [`ElectronicMoney.kt`](./getting-started-kotlin/src/main/kotlin/sample/ElectronicMoney.kt). diff --git a/docs/3.12/getting-started-with-scalardb.md b/docs/3.12/getting-started-with-scalardb.md new file mode 100644 index 00000000..bd6f7b3d --- /dev/null +++ b/docs/3.12/getting-started-with-scalardb.md @@ -0,0 +1,316 @@ +# Getting Started with ScalarDB + +This getting started tutorial explains how to configure your preferred database in ScalarDB and set up a basic electronic money application. + +{% capture notice--warning %} +**Warning** + +The electronic money application is simplified for this tutorial and isn't suitable for a production environment. +{% endcapture %} + +
{{ notice--warning | markdownify }}
+ +## Install a JDK + +Because ScalarDB is written in Java, you must have one of the following Java Development Kits (JDKs) installed in your environment: + +- [Oracle JDK](https://www.oracle.com/java/technologies/downloads/) LTS version (8, 11, or 17) +- [OpenJDK](https://openjdk.org/install/) LTS version (8, 11, or 17) + +{% capture notice--info %} +**Note** + +We recommend using the LTS versions mentioned above, but other non-LTS versions may work. + +In addition, other JDKs should work with ScalarDB, but we haven't tested them. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Clone the `scalardb` repository + +Open a terminal window, and go to your working directory. Then, clone the [scalardb](https://github.com/scalar-labs/scalardb) repository by running the following command: + +```shell +$ git clone https://github.com/scalar-labs/scalardb +``` + +Then, go to the `scalardb/docs/getting-started` directory in the cloned repository by running the following command: + +```shell +$ cd scalardb/docs/getting-started +``` + +## Set up your database for ScalarDB + +Select your database, and follow the instructions to configure it for ScalarDB. + +For a list of databases that ScalarDB supports, see [Supported Databases](scalardb-supported-databases.md). + +
+
+ + + + +
+ +
+ +Confirm that you have Cassandra installed. If Cassandra isn't installed, visit [Downloading Cassandra](https://cassandra.apache.org/_/download.html). + +### Configure Cassandra +{:.no_toc} + +Open **cassandra.yaml** in your preferred IDE. Then, change `commitlog_sync` from `periodic` to `batch` so that you don't lose data if a quorum of replica nodes goes down. + +### Configure ScalarDB +{:.no_toc} + +The following instructions assume that you have properly installed and configured the JDK and Cassandra in your local environment, and Cassandra is running on your localhost. + +The **scalardb.properties** file in the `docs/getting-started` directory holds database configurations for ScalarDB. The following is a basic configuration for Cassandra. Be sure to change the values for `scalar.db.username` and `scalar.db.password` as described. + +```properties +# The Cassandra storage implementation is used for Consensus Commit. +scalar.db.storage=cassandra + +# Comma-separated contact points. +scalar.db.contact_points=localhost + +# The port number for all the contact points. +scalar.db.contact_port=9042 + +# The username and password to access the database. +scalar.db.username= +scalar.db.password= +``` +
+
+ +To use Azure Cosmos DB for NoSQL, you must have an Azure account. If you don't have an Azure account, visit [Create an Azure Cosmos DB account](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/quickstart-portal#create-account). + +### Configure Cosmos DB for NoSQL +{:.no_toc} + +Set the **default consistency level** to **Strong** according to the official document at [Configure the default consistency level](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/how-to-manage-consistency#configure-the-default-consistency-level). + +### Configure ScalarDB +{:.no_toc} + +The following instructions assume that you have properly installed and configured the JDK in your local environment and properly configured your Cosmos DB for NoSQL account in Azure. + +The **scalardb.properties** file in the `docs/getting-started` directory holds database configurations for ScalarDB. Be sure to change the values for `scalar.db.contact_points` and `scalar.db.password` as described. + +```properties +# The Cosmos DB for NoSQL storage implementation is used for Consensus Commit. +scalar.db.storage=cosmos + +# The Cosmos DB for NoSQL URI. +scalar.db.contact_points= + +# The Cosmos DB for NoSQL key to access the database. +scalar.db.password= +``` + +{% capture notice--info %} +**Note** + +You can use a primary key or a secondary key as the value for `scalar.db.password`. +{% endcapture %} +
{{ notice--info | markdownify }}
+
+
+ +To use Amazon DynamoDB, you must have an AWS account. If you don't have an AWS account, visit [Getting started: Are you a first-time AWS user?](https://docs.aws.amazon.com/accounts/latest/reference/welcome-first-time-user.html). + +### Configure ScalarDB +{:.no_toc} + +The following instructions assume that you have properly installed and configured the JDK in your local environment. + +The **scalardb.properties** file in the `docs/getting-started` directory holds database configurations for ScalarDB. Be sure to change the values for `scalar.db.contact_points`, `scalar.db.username`, and `scalar.db.password` as described. + +```properties +# The DynamoDB storage implementation is used for Consensus Commit. +scalar.db.storage=dynamo + +# The AWS region. +scalar.db.contact_points= + +# The AWS access key ID and secret access key to access the database. +scalar.db.username= +scalar.db.password= +``` +
+
+ +Confirm that you have a JDBC database installed. For a list of supported JDBC databases, see [Supported Databases](scalardb-supported-databases.md). + +### Configure ScalarDB +{:.no_toc} + +The following instructions assume that you have properly installed and configured the JDK and JDBC database in your local environment, and the JDBC database is running on your localhost. + +The **scalardb.properties** file in the `docs/getting-started` directory holds database configurations for ScalarDB. The following is a basic configuration for JDBC databases. + +{% capture notice--info %} +**Note** + +Be sure to uncomment the `scalar.db.contact_points` variable and change the value of the JDBC database you are using, and change the values for `scalar.db.username` and `scalar.db.password` as described. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +```properties +# The JDBC database storage implementation is used for Consensus Commit. +scalar.db.storage=jdbc + +# The JDBC database URL for the type of database you are using. +# scalar.db.contact_points=jdbc:mysql://localhost:3306/ +# scalar.db.contact_points=jdbc:oracle:thin:@//localhost:1521/ +# scalar.db.contact_points=jdbc:postgresql://localhost:5432/ +# scalar.db.contact_points=jdbc:sqlserver://localhost:1433; +# scalar.db.contact_points=jdbc:sqlite://localhost:3306.sqlite3?busy_timeout=10000 + +# The username and password for connecting to the database. +scalar.db.username= +scalar.db.password= +``` +
+
+ +## Create and load the database schema + +You need to define the database schema (the method in which the data will be organized) in the application. For details about the supported data types, see [Data type mapping between ScalarDB and other databases](schema-loader.md#data-type-mapping-between-scalardb-and-the-other-databases). + +For this tutorial, create a file named **emoney.json** in the `scalardb/docs/getting-started` directory. Then, add the following JSON code to define the schema. + +```json +{ + "emoney.account": { + "transaction": true, + "partition-key": [ + "id" + ], + "clustering-key": [], + "columns": { + "id": "TEXT", + "balance": "INT" + } + } +} +``` + +To apply the schema, go to the [`scalardb` Releases](https://github.com/scalar-labs/scalardb/releases) page and download the ScalarDB Schema Loader that matches the version of ScalarDB that you are using to the `getting-started` folder. + +Then, run the following command, replacing `` with the version of the ScalarDB Schema Loader that you downloaded: + +```shell +$ java -jar scalardb-schema-loader-.jar --config scalardb.properties --schema-file emoney.json --coordinator +``` + +{% capture notice--info %} +**Note** + +The `--coordinator` option is specified because a table with `transaction` set to `true` exists in the schema. For details about configuring and loading a schema, see [ScalarDB Schema Loader](schema-loader.md). +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Execute transactions and retrieve data in the basic electronic money application + +After loading the schema, you can execute transactions and retrieve data in the basic electronic money application that is included in the repository that you cloned. + +The application supports the following types of transactions: + +- Create an account. +- Add funds to an account. +- Send funds between two accounts. +- Get an account balance. + +{% capture notice--info %} +**Note** + +When you first execute a Gradle command, Gradle will automatically install the necessary libraries. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Create an account with a balance + +You need an account with a balance so that you can send funds between accounts. + +To create an account for **customer1** that has a balance of **500**, run the following command: + +```shell +$ ./gradlew run --args="-action charge -amount 500 -to customer1" +``` + +### Create an account without a balance + +After setting up an account that has a balance, you need another account for sending funds to. + +To create an account for **merchant1** that has a balance of **0**, run the following command: + +```shell +$ ./gradlew run --args="-action charge -amount 0 -to merchant1" +``` + +### Add funds to an account + +You can add funds to an account in the same way that you created and added funds to an account in [Create an account with a balance](#create-an-account-with-a-balance). + +To add **500** to the account for **customer1**, run the following command: + +```shell +$ ./gradlew run --args="-action charge -amount 500 -to customer1" +``` + +The account for **customer1** will now have a balance of **1000**. + +### Send electronic money between two accounts + +Now that you have created two accounts, with at least one of those accounts having a balance, you can send funds from one account to the other account. + +To have **customer1** pay **100** to **merchant1**, run the following command: + +```shell +$ ./gradlew run --args="-action pay -amount 100 -from customer1 -to merchant1" +``` + +### Get an account balance + +After sending funds from one account to the other, you can check the balance of each account. + +To get the balance of **customer1**, run the following command: + +```shell +$ ./gradlew run --args="-action getBalance -id customer1" +``` + +You should see the following output: + +```shell +... +The balance for customer1 is 900 +... +``` + +To get the balance of **merchant1**, run the following command: + +```shell +$ ./gradlew run --args="-action getBalance -id merchant1" +``` + +You should see the following output: + +```shell +... +The balance for merchant1 is 100 +... +``` + +## Reference + +To see the source code for the electronic money application used in this tutorial, see [`ElectronicMoney.java`](./getting-started/src/main/java/sample/ElectronicMoney.java). diff --git a/docs/3.12/getting-started/build.gradle b/docs/3.12/getting-started/build.gradle new file mode 100644 index 00000000..7bd82d12 --- /dev/null +++ b/docs/3.12/getting-started/build.gradle @@ -0,0 +1,17 @@ +apply plugin: 'java' +apply plugin: 'idea' +apply plugin: 'application' + +repositories { + mavenCentral() +} + +mainClassName = "sample.ElectronicMoneyMain" + +dependencies { + implementation 'com.scalar-labs:scalardb:3.12.0' + implementation 'org.slf4j:slf4j-simple:1.7.30' +} + +sourceCompatibility = 1.8 +targetCompatibility = 1.8 diff --git a/docs/3.12/getting-started/build/classes/java/main/sample/ElectronicMoney.class b/docs/3.12/getting-started/build/classes/java/main/sample/ElectronicMoney.class new file mode 100644 index 00000000..207c9113 Binary files /dev/null and b/docs/3.12/getting-started/build/classes/java/main/sample/ElectronicMoney.class differ diff --git a/docs/3.12/getting-started/build/classes/java/main/sample/ElectronicMoneyMain.class b/docs/3.12/getting-started/build/classes/java/main/sample/ElectronicMoneyMain.class new file mode 100644 index 00000000..e47cf1c9 Binary files /dev/null and b/docs/3.12/getting-started/build/classes/java/main/sample/ElectronicMoneyMain.class differ diff --git a/docs/3.12/getting-started/build/tmp/compileJava/previous-compilation-data.bin b/docs/3.12/getting-started/build/tmp/compileJava/previous-compilation-data.bin new file mode 100644 index 00000000..1faadeb3 Binary files /dev/null and b/docs/3.12/getting-started/build/tmp/compileJava/previous-compilation-data.bin differ diff --git a/docs/3.12/getting-started/gradle/wrapper/gradle-wrapper.jar b/docs/3.12/getting-started/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 00000000..7454180f Binary files /dev/null and b/docs/3.12/getting-started/gradle/wrapper/gradle-wrapper.jar differ diff --git a/docs/3.12/getting-started/gradle/wrapper/gradle-wrapper.properties b/docs/3.12/getting-started/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 00000000..070cb702 --- /dev/null +++ b/docs/3.12/getting-started/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,5 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-7.6-bin.zip +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/docs/3.12/getting-started/gradlew b/docs/3.12/getting-started/gradlew new file mode 100755 index 00000000..744e882e --- /dev/null +++ b/docs/3.12/getting-started/gradlew @@ -0,0 +1,185 @@ +#!/usr/bin/env sh + +# +# Copyright 2015 the original author or authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn () { + echo "$*" +} + +die () { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MSYS* | MINGW* ) + msys=true + ;; + NONSTOP* ) + nonstop=true + ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin or MSYS, switch paths to Windows format before running java +if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=`expr $i + 1` + done + case $i in + 0) set -- ;; + 1) set -- "$args0" ;; + 2) set -- "$args0" "$args1" ;; + 3) set -- "$args0" "$args1" "$args2" ;; + 4) set -- "$args0" "$args1" "$args2" "$args3" ;; + 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Escape application args +save () { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " +} +APP_ARGS=`save "$@"` + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +exec "$JAVACMD" "$@" diff --git a/docs/3.12/getting-started/gradlew.bat b/docs/3.12/getting-started/gradlew.bat new file mode 100644 index 00000000..107acd32 --- /dev/null +++ b/docs/3.12/getting-started/gradlew.bat @@ -0,0 +1,89 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto execute + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/docs/3.12/getting-started/scalardb-schema-loader-3.10.1.jar b/docs/3.12/getting-started/scalardb-schema-loader-3.10.1.jar new file mode 100644 index 00000000..581fc7ba Binary files /dev/null and b/docs/3.12/getting-started/scalardb-schema-loader-3.10.1.jar differ diff --git a/docs/3.12/getting-started/scalardb.properties b/docs/3.12/getting-started/scalardb.properties new file mode 100755 index 00000000..b1dbcbde --- /dev/null +++ b/docs/3.12/getting-started/scalardb.properties @@ -0,0 +1,12 @@ +# Comma-separated contact points +scalar.db.contact_points=localhost + +# Port number for all the contact points. Default port number for each database is used if empty. +#scalar.db.contact_port= + +# Credential information to access the database +scalar.db.username=cassandra +scalar.db.password=cassandra + +# Storage implementation. Either cassandra or cosmos or dynamo or jdbc can be set. Default storage is cassandra. +#scalar.db.storage=cassandra diff --git a/docs/3.12/getting-started/settings.gradle b/docs/3.12/getting-started/settings.gradle new file mode 100644 index 00000000..744e2a3e --- /dev/null +++ b/docs/3.12/getting-started/settings.gradle @@ -0,0 +1 @@ +rootProject.name = 'getting-started' diff --git a/docs/3.12/getting-started/src/main/java/sample/ElectronicMoney.java b/docs/3.12/getting-started/src/main/java/sample/ElectronicMoney.java new file mode 100644 index 00000000..2af60ca2 --- /dev/null +++ b/docs/3.12/getting-started/src/main/java/sample/ElectronicMoney.java @@ -0,0 +1,153 @@ +package sample; + +import com.scalar.db.api.DistributedTransaction; +import com.scalar.db.api.DistributedTransactionManager; +import com.scalar.db.api.Get; +import com.scalar.db.api.Put; +import com.scalar.db.api.Result; +import com.scalar.db.exception.transaction.TransactionException; +import com.scalar.db.io.Key; +import com.scalar.db.service.TransactionFactory; +import java.io.IOException; +import java.util.Optional; + +public class ElectronicMoney { + + private static final String NAMESPACE = "emoney"; + private static final String TABLENAME = "account"; + private static final String ID = "id"; + private static final String BALANCE = "balance"; + + private final DistributedTransactionManager manager; + + public ElectronicMoney(String scalarDBProperties) throws IOException { + TransactionFactory factory = TransactionFactory.create(scalarDBProperties); + manager = factory.getTransactionManager(); + } + + public void charge(String id, int amount) throws TransactionException { + // Start a transaction + DistributedTransaction tx = manager.start(); + + try { + // Retrieve the current balance for id + Get get = + Get.newBuilder() + .namespace(NAMESPACE) + .table(TABLENAME) + .partitionKey(Key.ofText(ID, id)) + .build(); + Optional result = tx.get(get); + + // Calculate the balance + int balance = amount; + if (result.isPresent()) { + int current = result.get().getInt(BALANCE); + balance += current; + } + + // Update the balance + Put put = + Put.newBuilder() + .namespace(NAMESPACE) + .table(TABLENAME) + .partitionKey(Key.ofText(ID, id)) + .intValue(BALANCE, balance) + .build(); + tx.put(put); + + // Commit the transaction (records are automatically recovered in case of failure) + tx.commit(); + } catch (Exception e) { + tx.abort(); + throw e; + } + } + + public void pay(String fromId, String toId, int amount) throws TransactionException { + // Start a transaction + DistributedTransaction tx = manager.start(); + + try { + // Retrieve the current balances for ids + Get fromGet = + Get.newBuilder() + .namespace(NAMESPACE) + .table(TABLENAME) + .partitionKey(Key.ofText(ID, fromId)) + .build(); + Get toGet = + Get.newBuilder() + .namespace(NAMESPACE) + .table(TABLENAME) + .partitionKey(Key.ofText(ID, toId)) + .build(); + Optional fromResult = tx.get(fromGet); + Optional toResult = tx.get(toGet); + + // Calculate the balances (it assumes that both accounts exist) + int newFromBalance = fromResult.get().getInt(BALANCE) - amount; + int newToBalance = toResult.get().getInt(BALANCE) + amount; + if (newFromBalance < 0) { + throw new RuntimeException(fromId + " doesn't have enough balance."); + } + + // Update the balances + Put fromPut = + Put.newBuilder() + .namespace(NAMESPACE) + .table(TABLENAME) + .partitionKey(Key.ofText(ID, fromId)) + .intValue(BALANCE, newFromBalance) + .build(); + Put toPut = + Put.newBuilder() + .namespace(NAMESPACE) + .table(TABLENAME) + .partitionKey(Key.ofText(ID, toId)) + .intValue(BALANCE, newToBalance) + .build(); + tx.put(fromPut); + tx.put(toPut); + + // Commit the transaction (records are automatically recovered in case of failure) + tx.commit(); + } catch (Exception e) { + tx.abort(); + throw e; + } + } + + public int getBalance(String id) throws TransactionException { + // Start a transaction + DistributedTransaction tx = manager.start(); + + try { + // Retrieve the current balances for id + Get get = + Get.newBuilder() + .namespace(NAMESPACE) + .table(TABLENAME) + .partitionKey(Key.ofText(ID, id)) + .build(); + Optional result = tx.get(get); + + int balance = -1; + if (result.isPresent()) { + balance = result.get().getInt(BALANCE); + } + + // Commit the transaction + tx.commit(); + + return balance; + } catch (Exception e) { + tx.abort(); + throw e; + } + } + + public void close() { + manager.close(); + } +} diff --git a/docs/3.12/getting-started/src/main/java/sample/ElectronicMoneyMain.java b/docs/3.12/getting-started/src/main/java/sample/ElectronicMoneyMain.java new file mode 100644 index 00000000..53348744 --- /dev/null +++ b/docs/3.12/getting-started/src/main/java/sample/ElectronicMoneyMain.java @@ -0,0 +1,75 @@ +package sample; + +import java.io.File; + +public class ElectronicMoneyMain { + + public static void main(String[] args) throws Exception { + String action = null; + int amount = 0; + String to = null; + String from = null; + String id = null; + String scalarDBProperties = null; + + for (int i = 0; i < args.length; ++i) { + if ("-action".equals(args[i])) { + action = args[++i]; + } else if ("-amount".equals(args[i])) { + amount = Integer.parseInt(args[++i]); + } else if ("-to".equals(args[i])) { + to = args[++i]; + } else if ("-from".equals(args[i])) { + from = args[++i]; + } else if ("-id".equals(args[i])) { + id = args[++i]; + } else if ("-config".equals(args[i])) { + scalarDBProperties = args[++i]; + } else if ("-help".equals(args[i])) { + printUsageAndExit(); + return; + } + } + + if (action == null) { + printUsageAndExit(); + return; + } + + ElectronicMoney eMoney; + if (scalarDBProperties != null) { + eMoney = new ElectronicMoney(scalarDBProperties); + } else { + scalarDBProperties = System.getProperty("user.dir") + File.separator + "scalardb.properties"; + eMoney = new ElectronicMoney(scalarDBProperties); + } + + if (action.equalsIgnoreCase("charge")) { + if (to == null || amount < 0) { + printUsageAndExit(); + return; + } + eMoney.charge(to, amount); + } else if (action.equalsIgnoreCase("pay")) { + if (to == null || amount < 0 || from == null) { + printUsageAndExit(); + return; + } + eMoney.pay(from, to, amount); + } else if (action.equalsIgnoreCase("getBalance")) { + if (id == null) { + printUsageAndExit(); + return; + } + int balance = eMoney.getBalance(id); + System.out.println("The balance for " + id + " is " + balance); + } + eMoney.close(); + } + + private static void printUsageAndExit() { + System.err.println( + "ElectronicMoneyMain -action charge/pay/getBalance [-amount number (needed for charge and pay)] [-to id (needed for charge and pay)] [-from id (needed for pay)] [-id id (needed for getBalance)]"); + System.exit(1); + } +} diff --git a/docs/3.12/guides.md b/docs/3.12/guides.md new file mode 100644 index 00000000..be256305 --- /dev/null +++ b/docs/3.12/guides.md @@ -0,0 +1,15 @@ +--- +toc: false +--- + +# Developer Guides for ScalarDB + +The following is a list of developer guides for ScalarDB: + +- [ScalarDB Java API Guide](api-guide.md) +- [ScalarDB JDBC Guide](scalardb-sql/jdbc-guide.md) +- [ScalarDB SQL API Guide](scalardb-sql/sql-api-guide.md) +- [Spring Data Integration with ScalarDB Guide](scalardb-sql/spring-data-guide.md) +- [Developer Guide for ScalarDB Cluster with the Java API](scalardb-cluster/developer-guide-for-scalardb-cluster-with-java-api.md) +- [ScalarDB Cluster gRPC API Guide](scalardb-cluster/scalardb-cluster-grpc-api-guide.md) +- [ScalarDB Cluster SQL gRPC API Guide](scalardb-cluster/scalardb-cluster-sql-grpc-api-guide.md) diff --git a/docs/3.12/helm-charts/README.md b/docs/3.12/helm-charts/README.md new file mode 100644 index 00000000..2bc4da97 --- /dev/null +++ b/docs/3.12/helm-charts/README.md @@ -0,0 +1,9 @@ +# Index + +## For users +* [Getting Started with Scalar Helm Charts](./getting-started-scalar-helm-charts.md) +* [Configure a custom values file for Scalar Helm Chart](./configure-custom-values-file.md) +* [Deploy Scalar products using Scalar Helm Charts](./how-to-deploy-scalar-products.md) + +## For developers +* [Release Flow](./ReleaseFlow.md) diff --git a/docs/3.12/helm-charts/conf/scalar-loki-stack-custom-values.yaml b/docs/3.12/helm-charts/conf/scalar-loki-stack-custom-values.yaml new file mode 100644 index 00000000..f078536a --- /dev/null +++ b/docs/3.12/helm-charts/conf/scalar-loki-stack-custom-values.yaml @@ -0,0 +1,55 @@ +promtail: + config: + snippets: + # -- `scapeConfigs` is exactly the part of https://grafana.com/docs/loki/latest/clients/promtail/configuration/#scrape_configs + # -- The value will be created as a Kubernetes ConfigMap and then mounted to the Promtail Pod. + # -- Not really need to change this value. It's set to scrape all logs of ScalarDL/DB Pods by using regular expression. + scrapeConfigs: | + # -- the `scalardl` job scrapes all the logs from Scalar Ledger Pods, Scalar Auditor Pods, and the corresponding Envoy Pods + - job_name: scalardl + pipeline_stages: + - docker: {} + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_node_name + target_label: __host__ + - action: replace + source_labels: + - __meta_kubernetes_pod_name + target_label: pod + - action: keep + regex: (.*)scalardl-(.+) + source_labels: + - pod + - replacement: /var/log/pods/*$1/*.log + separator: / + source_labels: + - __meta_kubernetes_pod_uid + - __meta_kubernetes_pod_container_name + target_label: __path__ + # -- the `scalardb` job scrapes all the logs of ScalarDB Server Pods and the corresponding Envoy Pods + - job_name: scalardb + pipeline_stages: + - docker: {} + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_node_name + target_label: __host__ + - action: replace + source_labels: + - __meta_kubernetes_pod_name + target_label: pod + - action: keep + regex: (.*)scalardb-(.+) + source_labels: + - pod + - replacement: /var/log/pods/*$1/*.log + separator: / + source_labels: + - __meta_kubernetes_pod_uid + - __meta_kubernetes_pod_container_name + target_label: __path__ diff --git a/docs/3.12/helm-charts/conf/scalar-manager-custom-values.yaml b/docs/3.12/helm-charts/conf/scalar-manager-custom-values.yaml new file mode 100644 index 00000000..7d19a4d3 --- /dev/null +++ b/docs/3.12/helm-charts/conf/scalar-manager-custom-values.yaml @@ -0,0 +1,12 @@ +scalarManager: + # targets -- The targets that Scalar Manager should monitor + targets: # example + - name: Ledgers + adminSrv: _scalardl-admin._tcp.scalardl-headless.default.svc.cluster.local + databaseType: cassandra + + # grafanaUrl -- The URL where Grafana can be accessable in public + grafanaUrl: "http://localhost:3000" + + # refreshInterval -- The interval that Scalar Manager refresh the status of the monitoring targets + refreshInterval: 30 diff --git a/docs/3.12/helm-charts/conf/scalar-prometheus-custom-values.yaml b/docs/3.12/helm-charts/conf/scalar-prometheus-custom-values.yaml new file mode 100644 index 00000000..816ead1b --- /dev/null +++ b/docs/3.12/helm-charts/conf/scalar-prometheus-custom-values.yaml @@ -0,0 +1,167 @@ +defaultRules: + # -- Default PrometheusRules are not enabled + create: false + +alertmanager: + # -- alertmanager is enabled + enabled: true + + # -- Only check own namespace + alertmanagerConfigNamespaceSelector: null + +grafana: + # -- grafana is enabled + enabled: true + + # -- Default Grafana dashboards are not enabled + defaultDashboardsEnabled: false + + sidecar: + datasources: + enabled: true + defaultDatasourceEnabled: false + label: grafana_datasource + labelValue: "1" + dashboards: + enabled: true + label: grafana_dashboard + labelValue: "1" + # -- Resource limits & requests + resources: {} + # requests: + # memory: 400Mi + + # -- Grafana's primary configuration + grafana.ini: + security: + # -- allow Grafana to be embedded (not set the X-Frame-Options header) + # -- If you use Scalar Manager, you need to set allow_embedding to true. + # -- https://grafana.com/docs/grafana/latest/administration/configuration/#allow_embedding + allow_embedding: false + + # -- Additional data source configurations + additionalDataSources: + - name: Prometheus + type: prometheus + uid: prometheus + url: http://scalar-monitoring-kube-pro-prometheus:9090/ + access: proxy + editable: false + isDefault: false + jsonData: + timeInterval: 30s + # - name: Loki + # type: loki + # uid: loki + # url: http://scalar-logging-loki:3100/ + # access: proxy + # editable: false + # isDefault: false + +kubeApiServer: + # -- Scraping kube-apiserver is disabled + enabled: false + +kubeControllerManager: + # -- Scraping kube-controller-manager is disabled + enabled: false + +coreDns: + # -- Scraping CoreDNS is disabled + enabled: false + +kubeEtcd: + # -- Scraping etcd is disabled + enabled: false + +kubeScheduler: + # -- Scraping kube-scheduler is disabled + enabled: false + +kubeProxy: + # -- Scraping kube-proxy is disabled + enabled: false + +kubelet: + # -- Scraping kubelet is disabled + enabled: false + +kubeStateMetrics: + # -- kube-state-metrics is disabled + enabled: false + +nodeExporter: + # -- node-exporter is disabled + enabled: false + +prometheusOperator: + # -- Prometheus Operator is enabled + enabled: true + + admissionWebhooks: + patch: + # -- Resource limits & requests + resources: {} + # requests: + # memory: 400Mi + + namespaces: + # -- Only check own namespace + releaseNamespace: true + + kubeletService: + # -- kubelet service for scraping kubelets is disabled + enabled: false + + ## -- Resource limits & requests + resources: {} + # requests: + # memory: 400Mi + +prometheus: + # -- Prometheus is enabled + enabled: true + + prometheusSpec: + # -- All PrometheusRules are enabled + ruleSelectorNilUsesHelmValues: false + + # -- Only check own namespace + ruleNamespaceSelector: {} + + # -- All ServiceMonitors are enabled + serviceMonitorSelectorNilUsesHelmValues: false + + # -- Only check own namespace + serviceMonitorNamespaceSelector: {} + + # -- All PodMonitors are enabled + podMonitorSelectorNilUsesHelmValues: false + + # -- Only check own namespace + podMonitorNamespaceSelector: {} + + # -- All Probes are enabled + probeSelectorNilUsesHelmValues: false + + # -- Only check own namespace + probeNamespaceSelector: {} + + ## -- Resource limits & requests + resources: {} + # requests: + # memory: 400Mi + + ## -- Prometheus StorageSpec for persistent data + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md + storageSpec: {} + ## Using PersistentVolumeClaim + ## + # volumeClaimTemplate: + # spec: + # storageClassName: gluster + # accessModes: ["ReadWriteOnce"] + # resources: + # requests: + # storage: 50Gi + # selector: {} diff --git a/docs/3.12/helm-charts/configure-custom-values-envoy.md b/docs/3.12/helm-charts/configure-custom-values-envoy.md new file mode 100644 index 00000000..07660951 --- /dev/null +++ b/docs/3.12/helm-charts/configure-custom-values-envoy.md @@ -0,0 +1,174 @@ +# Configure a custom values file for Scalar Envoy + +This document explains how to create your custom values file for the Scalar Envoy chart. If you want to know the details of the parameters, please refer to the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/envoy/README.md) of the Scalar Envoy chart. + +## Configure custom values for Scalar Envoy chart + +The Scalar Envoy chart is used via other charts (scalardb, scalardb-cluster, scalardl, and scalardl-audit), so you don't need to create a custom values file for the Scalar Envoy chart. If you want to configure Scalar Envoy, you need to add the `envoy.*` configuration to the other charts. + +For example, if you want to configure the Scalar Envoy for ScalarDB Server, you can configure some Scalar Envoy configurations in the custom values file of ScalarDB as follows. + +* Example (scalardb-custom-values.yaml) + ```yaml + envoy: + configurationsForScalarEnvoy: + ... + + scalardb: + configurationsForScalarDB: + ... + ``` + +## Required configurations + +### Service configurations + +You must set `envoy.service.type` to specify the Service resource type of Kubernetes. + +If you accept client requests from inside of the Kubernetes cluster only (for example, if you deploy your client applications on the same Kubernetes cluster as Scalar products), you can set `envoy.service.type` to `ClusterIP`. This configuration doesn't create any load balancers provided by cloud service providers. + +```yaml +envoy: + service: + type: ClusterIP +``` + +If you want to use a load balancer provided by a cloud service provider to accept client requests from outside of the Kubernetes cluster, you need to set `envoy.service.type` to `LoadBalancer`. + +```yaml +envoy: + service: + type: LoadBalancer +``` + +If you want to configure the load balancer via annotations, you can also set annotations to `envoy.service.annotations`. + +```yaml +envoy: + service: + type: LoadBalancer + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: "true" + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" +``` + +## Optional configurations + +### Resource configurations (Recommended in the production environment) + +If you want to control pod resources using the requests and limits of Kubernetes, you can use `envoy.resources`. + +You can configure them using the same syntax as the requests and limits of Kubernetes. So, please refer to the official document [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for more details on the requests and limits of Kubernetes. + +```yaml +envoy: + resources: + requests: + cpu: 1000m + memory: 2Gi + limits: + cpu: 2000m + memory: 4Gi +``` + +### Affinity configurations (Recommended in the production environment) + +If you want to control pod deployment using the affinity and anti-affinity of Kubernetes, you can use `envoy.affinity`. + +You can configure them using the same syntax as the affinity of Kubernetes. So, please refer to the official document [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) for more details on the affinity configuration of Kubernetes. + +```yaml +envoy: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - scalardb-cluster + - key: app.kubernetes.io/app + operator: In + values: + - envoy + topologyKey: kubernetes.io/hostname + weight: 50 +``` + +### Prometheus and Grafana configurations (Recommended in production environments) + +If you want to monitor Scalar Envoy pods using [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), you can deploy a ConfigMap, a ServiceMonitor, and a PrometheusRule resource for kube-prometheus-stack using `envoy.grafanaDashboard.enabled`, `envoy.serviceMonitor.enabled`, and `envoy.prometheusRule.enabled`. + +```yaml +envoy: + grafanaDashboard: + enabled: true + namespace: monitoring + serviceMonitor: + enabled: true + namespace: monitoring + interval: 15s + prometheusRule: + enabled: true + namespace: monitoring +``` + +### SecurityContext configurations (Default value is recommended) + +If you want to set SecurityContext and PodSecurityContext for Scalar Envoy pods, you can use `envoy.securityContext` and `envoy.podSecurityContext`. + +You can configure them using the same syntax as SecurityContext and PodSecurityContext of Kubernetes. So, please refer to the official document [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for more details on the SecurityContext and PodSecurityContext configurations of Kubernetes. + +```yaml +envoy: + podSecurityContext: + seccompProfile: + type: RuntimeDefault + securityContext: + capabilities: + drop: + - ALL + runAsNonRoot: true + allowPrivilegeEscalation: false +``` + +### Image configurations (Default value is recommended) + +If you want to change the image repository and version, you can use `envoy.image.repository` to specify the container repository information of the Scalar Envoy container image that you want to pull. + +```yaml +envoy: + image: + repository: +``` + +If you're using AWS or Azure, please refer to the following documents for more details: + +* [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md) +* [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md) + +### Replica configurations (Optional based on your environment) + +You can specify the number of replicas (pods) of Scalar Envoy using `envoy.replicaCount`. + +```yaml +envoy: + replicaCount: 3 +``` + +### Taint and toleration configurations (Optional based on your environment) + +If you want to control pod deployment by using the taints and tolerations in Kubernetes, you can use `envoy.tolerations`. + +You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). + +```yaml +envoy: + tolerations: + - effect: NoSchedule + key: scalar-labs.com/dedicated-node + operator: Equal + value: scalardb +``` diff --git a/docs/3.12/helm-charts/configure-custom-values-file.md b/docs/3.12/helm-charts/configure-custom-values-file.md new file mode 100644 index 00000000..2290933a --- /dev/null +++ b/docs/3.12/helm-charts/configure-custom-values-file.md @@ -0,0 +1,14 @@ +# Configure a custom values file for Scalar Helm Charts + +When you deploy Scalar products using Scalar Helm Charts, you must prepare your custom values file based on your environment. Please refer to the following documents for more details on how to a create custom values file for each product. + +* [ScalarDB Cluster](./configure-custom-values-scalardb-cluster.md) +* [ScalarDB Analytics with PostgreSQL](./configure-custom-values-scalardb-analytics-postgresql.md) +* [ScalarDL Ledger](./configure-custom-values-scalardl-ledger.md) +* [ScalarDL Auditor](./configure-custom-values-scalardl-auditor.md) +* [ScalarDL Schema Loader](./configure-custom-values-scalardl-schema-loader.md) +* [Scalar Admin for Kubernetes](./configure-custom-values-scalar-admin-for-kubernetes.md) +* [Scalar Manager](./configure-custom-values-scalar-manager.md) +* [Envoy](./configure-custom-values-envoy.md) +* [[Deprecated] ScalarDB Server](./configure-custom-values-scalardb.md) +* [[Deprecated] ScalarDB GraphQL](./configure-custom-values-scalardb-graphql.md) diff --git a/docs/3.12/helm-charts/configure-custom-values-scalar-admin-for-kubernetes.md b/docs/3.12/helm-charts/configure-custom-values-scalar-admin-for-kubernetes.md new file mode 100644 index 00000000..39b0ef04 --- /dev/null +++ b/docs/3.12/helm-charts/configure-custom-values-scalar-admin-for-kubernetes.md @@ -0,0 +1,101 @@ +# Configure a custom values file for Scalar Admin for Kubernetes + +This document explains how to create your custom values file for the Scalar Admin for Kubernetes chart. For details on the parameters, see the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalar-admin-for-kubernetes/README.md) of the Scalar Admin for Kubernetes chart. + +## Required configurations + +This section explains the required configurations when setting up a custom values file for Scalar Admin for Kubernetes. + +### Flag configurations + +You must specify several flags to `scalarAdminForKubernetes.commandArgs` as an array to run Scalar Admin for Kubernetes. For more details on the flags, see [README](https://github.com/scalar-labs/scalar-admin-for-kubernetes/blob/main/README.md) of Scalar Admin for Kubernetes. + +```yaml +scalarAdminForKubernetes: + commandArgs: + - -r + - + - -n + - + - -d + - + - -z + - +``` + +## Optional configurations + +This section explains the optional configurations when setting up a custom values file for Scalar Admin for Kubernetes. + +### CronJob configurations (optional based on your environment) + +By default, the Scalar Admin for Kubernetes chart creates a [Job](https://kubernetes.io/docs/concepts/workloads/controllers/job/) resource to run the Scalar Admin for Kubernetes CLI tool once. If you want to run the Scalar Admin for Kubernetes CLI tool periodically by using [CronJob](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/), you can set `scalarAdminForKubernetes.cronJob.enabled` to `true`. Also, you can set some configurations for the CronJob resource. + +```yaml +scalarAdminForKubernetes: + cronJob: + enabled: true + timeZone: "Etc/UTC" + schedule: "0 0 * * *" +``` + +### Resource configurations (recommended in production environments) + +To control pod resources by using requests and limits in Kubernetes, you can use `scalarAdminForKubernetes.resources`. + +You can configure requests and limits by using the same syntax as requests and limits in Kubernetes. For more details on requests and limits in Kubernetes, see [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + +```yaml +scalarAdminForKubernetes: + resources: + requests: + cpu: 1000m + memory: 2Gi + limits: + cpu: 2000m + memory: 4Gi +``` + +### SecurityContext configurations (default value is recommended) + +To set SecurityContext and PodSecurityContext for Scalar Admin for Kubernetes pods, you can use `scalarAdminForKubernetes.securityContext` and `scalarAdminForKubernetes.podSecurityContext`. + +You can configure SecurityContext and PodSecurityContext by using the same syntax as SecurityContext and PodSecurityContext in Kubernetes. For more details on the SecurityContext and PodSecurityContext configurations in Kubernetes, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). + +```yaml +scalarAdminForKubernetes: + podSecurityContext: + seccompProfile: + type: RuntimeDefault + securityContext: + capabilities: + drop: + - ALL + runAsNonRoot: true + allowPrivilegeEscalation: false +``` + +### Image configurations (default value is recommended) + +If you want to change the image repository, you can use `scalarAdminForKubernetes.image.repository` to specify the container repository information of the Scalar Admin for Kubernetes image that you want to pull. + +```yaml +scalarAdminForKubernetes: + image: + repository: +``` + +### Taint and toleration configurations (optional based on your environment) + +If you want to control pod deployment by using taints and tolerations in Kubernetes, you can use `scalarAdminForKubernetes.tolerations`. + +You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). + +```yaml +scalarAdminForKubernetes: + tolerations: + - effect: NoSchedule + key: scalar-labs.com/dedicated-node + operator: Equal + value: scalardb-analytics-postgresql +``` diff --git a/docs/3.12/helm-charts/configure-custom-values-scalar-manager.md b/docs/3.12/helm-charts/configure-custom-values-scalar-manager.md new file mode 100644 index 00000000..2ceda5b8 --- /dev/null +++ b/docs/3.12/helm-charts/configure-custom-values-scalar-manager.md @@ -0,0 +1,66 @@ +# Configure a custom values file for Scalar Manager + +This document explains how to create your custom values file for the Scalar Manager chart. If you want to know the details of the parameters, please refer to the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalar-manager/README.md) of the Scalar Manager chart. + +## Required configurations + +### Service configurations + +You must set `service.type` to specify the Service resource type of Kubernetes. If you want to use a load balancer provided by could providers, you need to set `service.type` to `LoadBalancer`. + +```yaml +service: + type: LoadBalancer +``` + +### Image configurations + +You must set `image.repository`. Be sure to specify the Scalar Manager container image so that you can pull the image from the container repository. + +```yaml +image: + repository: +``` + +### Targets configurations + +You must set `scalarManager.targets`. Please set the DNS Service URL that returns the SRV record of pods. Kubernetes creates this URL for the named port of the headless service of the Scalar product. The format is `_{port name}._{protocol}.{service name}.{namespace}.svc.{cluster domain name}`. + +```yaml +scalarManager: + targets: + - name: Ledger + adminSrv: _scalardl-admin._tcp.scalardl-headless.default.svc.cluster.local + databaseType: cassandra + - name: Auditor + adminSrv: _scalardl-auditor-admin._tcp.scalardl-auditor-headless.default.svc.cluster.local + databaseType: cassandra +``` + +### Grafana configurations + +You must set the `scalarManager.grafanaUrl`. Please specify your Grafana URL. + +```yaml +scalarManager: + grafanaUrl: "http://localhost:3000" +``` + +## Optional configurations + +### Replica configurations (Optional based on your environment) + +You can specify the number of replicas (pods) of Scalar Manager using `replicaCount`. + +```yaml +replicaCount: 3 +``` + +### Refresh interval configurations (Optional based on your environment) + +You can specify the refresh interval that Scalar Manager checks the status of the products using `scalarManager.refreshInterval`. + +```yaml +scalarManager: + refreshInterval: 30 +``` diff --git a/docs/3.12/helm-charts/configure-custom-values-scalardb-analytics-postgresql.md b/docs/3.12/helm-charts/configure-custom-values-scalardb-analytics-postgresql.md new file mode 100644 index 00000000..91e4ecb0 --- /dev/null +++ b/docs/3.12/helm-charts/configure-custom-values-scalardb-analytics-postgresql.md @@ -0,0 +1,185 @@ +# Configure a custom values file for ScalarDB Analytics with PostgreSQL + +This document explains how to create your custom values file for the ScalarDB Analytics with PostgreSQL chart. For details on the parameters, see the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalardb-analytics-postgresql/README.md) of the ScalarDB Analytics with PostgreSQL chart. + +## Required configurations + +This section explains the required configurations when setting up a custom values file for ScalarDB Analytics with PostgreSQL. + +### Database configurations + +To access databases via ScalarDB Analytics with PostgreSQL, you must set the `scalardbAnalyticsPostgreSQL.databaseProperties` parameter by following the same syntax that you use to configure the `database.properties` file. For details about configurations, see [ScalarDB Configurations](https://github.com/scalar-labs/scalardb/blob/master/docs/configurations.md). + +```yaml +scalardbAnalyticsPostgreSQL: + databaseProperties: | + scalar.db.contact_points=localhost + scalar.db.username=${env:SCALAR_DB_USERNAME:-} + scalar.db.password=${env:SCALAR_DB_PASSWORD:-} + scalar.db.storage=cassandra +``` + +### Database namespaces configurations + +You must set `schemaImporter.namespaces` to all the database namespaces that include tables you want to read via ScalarDB Analytics with PostgreSQL. + +```yaml +schemaImporter: + namespaces: + - namespace1 + - namespace2 + - namespace3 +``` + +## Optional configurations + +This section explains the optional configurations when setting up a custom values file for ScalarDB Analytics with PostgreSQL. + +### Resource configurations (recommended in production environments) + +To control pod resources by using requests and limits in Kubernetes, you can use `scalardbAnalyticsPostgreSQL.resources`. + +You can configure requests and limits by using the same syntax as requests and limits in Kubernetes. For more details on requests and limits in Kubernetes, see [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + +```yaml +scalardbAnalyticsPostgreSQL: + resources: + requests: + cpu: 1000m + memory: 2Gi + limits: + cpu: 2000m + memory: 4Gi +``` + +### Secret configurations (recommended in production environments) + +To use environment variables to set some properties, like credentials, in `scalardbAnalyticsPostgreSQL.databaseProperties`, you can use `scalardbAnalyticsPostgreSQL.secretName` to specify the secret resource that includes some credentials. + +For example, you can set credentials for a backend database (`scalar.db.username` and `scalar.db.password`) by using environment variables, which makes your pods more secure. + +For more details on how to use a secret resource, see [How to use Secret resources to pass the credentials as the environment variables into the properties file](./use-secret-for-credentials.md). + +```yaml +scalardbAnalyticsPostgreSQL: + secretName: "scalardb-analytics-postgresql-credentials-secret" +``` + +### Affinity configurations (recommended in production environments) + +To control pod deployment by using affinity and anti-affinity in Kubernetes, you can use `scalardbAnalyticsPostgreSQL.affinity`. + +You can configure affinity and anti-affinity by using the same syntax for affinity and anti-affinity in Kubernetes. For more details on configuring affinity in Kubernetes, see [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). + +```yaml +scalardbAnalyticsPostgreSQL: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - scalardb-analytics-postgresql + - key: app.kubernetes.io/app + operator: In + values: + - scalardb-analytics-postgresql + topologyKey: kubernetes.io/hostname +``` + +### SecurityContext configurations (default value is recommended) + +To set SecurityContext and PodSecurityContext for ScalarDB Analytics with PostgreSQL pods, you can use `scalardbAnalyticsPostgreSQL.securityContext`, `scalardbAnalyticsPostgreSQL.podSecurityContext`, and `schemaImporter.securityContext`. + +You can configure SecurityContext and PodSecurityContext by using the same syntax as SecurityContext and PodSecurityContext in Kubernetes. For more details on the SecurityContext and PodSecurityContext configurations in Kubernetes, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). + +```yaml +scalardbAnalyticsPostgreSQL: + podSecurityContext: + fsGroup: 201 + seccompProfile: + type: RuntimeDefault + securityContext: + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 999 + allowPrivilegeEscalation: false + +schemaImporter: + securityContext: + capabilities: + drop: + - ALL + runAsNonRoot: true + allowPrivilegeEscalation: false +``` + +### Image configurations (default value is recommended) + +If you want to change the image repository, you can use `scalardbAnalyticsPostgreSQL.image.repository` and `schemaImporter.image.repository` to specify the container repository information of the ScalarDB Analytics with PostgreSQL and Schema Importer images that you want to pull. + +```yaml +scalardbAnalyticsPostgreSQL: + image: + repository: + +schemaImporter: + image: + repository: +``` + +### Replica configurations (optional based on your environment) + +You can specify the number of ScalarDB Analytics with PostgreSQL replicas (pods) by using `scalardbAnalyticsPostgreSQL.replicaCount`. + +```yaml +scalardbAnalyticsPostgreSQL: + replicaCount: 3 +``` + +### PostgreSQL database name configuration (optional based on your environment) + +You can specify the database name that you create in PostgreSQL. Schema Importer creates some objects, such as a view of ScalarDB Analytics with PostgreSQL, in this database. + +```yaml +scalardbAnalyticsPostgreSQL: + postgresql: + databaseName: scalardb +``` + +### PostgreSQL superuser password configuration (optional based on your environment) + +You can specify the secret name that includes the superuser password for PostgreSQL. + +```yaml +scalardbAnalyticsPostgreSQL: + postgresql: + secretName: scalardb-analytics-postgresql-superuser-password +``` + +{% capture notice--info %} +**Note** + +You must create a secret resource with this name (`scalardb-analytics-postgresql-superuser-password` by default) before you deploy ScalarDB Analytics with PostgreSQL. For details, see [Prepare a secret resource](./how-to-deploy-scalardb-analytics-postgresql.md#prepare-a-secret-resource). +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Taint and toleration configurations (optional based on your environment) + +If you want to control pod deployment by using taints and tolerations in Kubernetes, you can use `scalardbAnalyticsPostgreSQL.tolerations`. + +You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). + +```yaml +scalardbAnalyticsPostgreSQL: + tolerations: + - effect: NoSchedule + key: scalar-labs.com/dedicated-node + operator: Equal + value: scalardb-analytics-postgresql +``` diff --git a/docs/3.12/helm-charts/configure-custom-values-scalardb-cluster.md b/docs/3.12/helm-charts/configure-custom-values-scalardb-cluster.md new file mode 100644 index 00000000..397d41c2 --- /dev/null +++ b/docs/3.12/helm-charts/configure-custom-values-scalardb-cluster.md @@ -0,0 +1,223 @@ +# Configure a custom values file for ScalarDB Cluster + +This document explains how to create your custom values file for the ScalarDB Cluster chart. For details on the parameters, see the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalardb-cluster/README.md) of the ScalarDB Cluster chart. + +## Required configurations + +### Image configurations + +You must set `scalardbCluster.image.repository`. Be sure to specify the ScalarDB Cluster container image so that you can pull the image from the container repository. + +```yaml +scalardbCluster: + image: + repository: +``` + +### Database configurations + +You must set `scalardbCluster.scalardbClusterNodeProperties`. Please set `scalardb-cluster-node.properties` to this parameter. For more details on the configurations of ScalarDB Cluster, see [ScalarDB Cluster Configurations](https://github.com/scalar-labs/scalardb-cluster/blob/main/docs/scalardb-cluster-configurations.md). + +```yaml +scalardbCluster: + scalardbClusterNodeProperties: | + scalar.db.cluster.membership.type=KUBERNETES + scalar.db.cluster.membership.kubernetes.endpoint.namespace_name=${env:SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAMESPACE_NAME} + scalar.db.cluster.membership.kubernetes.endpoint.name=${env:SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAME} + scalar.db.contact_points=localhost + scalar.db.username=${env:SCALAR_DB_USERNAME} + scalar.db.password=${env:SCALAR_DB_PASSWORD} + scalar.db.storage=cassandra +``` + +Note that you must always set the following three properties if you deploy ScalarDB Cluster in a Kubernetes environment by using Scalar Helm Chart. These properties are fixed values. Since the properties don't depend on individual environments, you can set the same values by copying the following values and pasting them in `scalardbCluster.scalardbClusterNodeProperties`. + +```yaml +scalardbCluster: + scalardbClusterNodeProperties: | + scalar.db.cluster.membership.type=KUBERNETES + scalar.db.cluster.membership.kubernetes.endpoint.namespace_name=${env:SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAMESPACE_NAME} + scalar.db.cluster.membership.kubernetes.endpoint.name=${env:SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAME} +``` + +## Optional configurations + +### Resource configurations (recommended in production environments) + +To control pod resources by using requests and limits in Kubernetes, you can use `scalardbCluster.resources`. + +Note that, for commercial licenses, the resources for each pod of Scalar products are limited to 2vCPU / 4GB memory. Also, if you use the pay-as-you-go containers that the AWS Marketplace provides, you will not be able to run any containers that exceed the 2vCPU / 4GB memory configuration in `resources.limits`. If you exceed this resource limitation, the pods will automatically stop. + +You can configure requests and limits by using the same syntax as requests and limits in Kubernetes. For more details on requests and limits in Kubernetes, see [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + +```yaml +scalardbCluster: + resources: + requests: + cpu: 2000m + memory: 4Gi + limits: + cpu: 2000m + memory: 4Gi +``` + +### Secret configurations (recommended in production environments) + +To use environment variables to set some properties (e.g., credentials) in `scalardbCluster.scalardbClusterNodeProperties`, you can use `scalardbCluster.secretName` to specify the Secret resource that includes some credentials. + +For example, you can set credentials for a backend database (`scalar.db.username` and `scalar.db.password`) by using environment variables, which makes your pods more secure. + +For more details on how to use a Secret resource, see [How to use Secret resources to pass the credentials as the environment variables into the properties file](./use-secret-for-credentials.md). + +```yaml +scalardbCluster: + secretName: "scalardb-cluster-credentials-secret" +``` + +### Affinity configurations (recommended in production environments) + +To control pod deployment by using affinity and anti-affinity in Kubernetes, you can use `scalardbCluster.affinity`. + +You can configure affinity and anti-affinity by using the same syntax for affinity and anti-affinity in Kubernetes. For more details on configuring affinity in Kubernetes, see [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). + +```yaml +scalardbCluster: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - scalardb-cluster + - key: app.kubernetes.io/app + operator: In + values: + - scalardb-cluster + topologyKey: kubernetes.io/hostname + weight: 50 +``` + +### Prometheus and Grafana configurations (recommended in production environments) + +To monitor ScalarDB Cluster pods by using [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), you can set `scalardbCluster.grafanaDashboard.enabled`, `scalardbCluster.serviceMonitor.enabled`, and `scalardbCluster.prometheusRule.enabled` to `true`. When you set these configurations to `true`, the chart deploys the necessary resources and kube-prometheus-stack starts monitoring automatically. + +```yaml +scalardbCluster: + grafanaDashboard: + enabled: true + namespace: monitoring + serviceMonitor: + enabled: true + namespace: monitoring + interval: 15s + prometheusRule: + enabled: true + namespace: monitoring +``` + +### SecurityContext configurations (default value is recommended) + +To set SecurityContext and PodSecurityContext for ScalarDB Cluster pods, you can use `scalardbCluster.securityContext` and `scalardbCluster.podSecurityContext`. + +You can configure SecurityContext and PodSecurityContext by using the same syntax as SecurityContext and PodSecurityContext in Kubernetes. For more details on the SecurityContext and PodSecurityContext configurations in Kubernetes, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). + +```yaml +scalardbCluster: + podSecurityContext: + seccompProfile: + type: RuntimeDefault + securityContext: + capabilities: + drop: + - ALL + runAsNonRoot: true + allowPrivilegeEscalation: false +``` + +### Replica configurations (optional based on your environment) + +You can specify the number of ScalarDB Cluster replicas (pods) by using `scalardbCluster.replicaCount`. + +```yaml +scalardbCluster: + replicaCount: 3 +``` + +### Logging configurations (optional based on your environment) + +To change the ScalarDB Cluster log level, you can use `scalardbCluster.logLevel`. + +```yaml +scalardbCluster: + logLevel: INFO +``` + +### GraphQL configurations (optional based on your environment) + +To use the GraphQL feature in ScalarDB Cluster, you can set `scalardbCluster.graphql.enabled` to `true` to deploy some resources for the GraphQL feature. Note that you also need to set `scalar.db.graphql.enabled=true` in `scalardbCluster.scalardbClusterNodeProperties` when using the GraphQL feature. + +```yaml +scalardbCluster: + graphql: + enabled: true +``` + +Also, you can configure the `Service` resource that accepts GraphQL requests from clients. + +```yaml +scalardbCluster: + graphql: + service: + type: ClusterIP + annotations: {} + ports: + graphql: + port: 8080 + targetPort: 8080 + protocol: TCP +``` + +### SQL configurations (optional based on your environment) + +To use the SQL feature in ScalarDB Cluster, there is no configuration necessary for custom values files. You can use the feature by setting `scalar.db.sql.enabled=true` in `scalardbCluster.scalardbClusterNodeProperties`. + +### Scalar Envoy configurations (optional based on your environment) + +To use ScalarDB Cluster with `indirect` mode, you must enable Envoy as follows. + +```yaml +envoy: + enabled: true +``` + +Also, you must set the Scalar Envoy configurations in the custom values file for ScalarDB Cluster. This is because clients need to send requests to ScalarDB Cluster via Scalar Envoy as the load balancer of gRPC requests if you deploy ScalarDB Cluster in a Kubernetes environment with `indirect` mode. + +For more details on Scalar Envoy configurations, see [Configure a custom values file for Scalar Envoy](configure-custom-values-envoy.md). + +```yaml +envoy: + configurationsForScalarEnvoy: + ... + +scalardbCluster: + configurationsForScalarDbCluster: + ... +``` + +### Taint and toleration configurations (optional based on your environment) + +If you want to control pod deployment by using the taints and tolerations in Kubernetes, you can use `scalardbCluster.tolerations`. + +You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). + +```yaml +scalardbCluster: + tolerations: + - effect: NoSchedule + key: scalar-labs.com/dedicated-node + operator: Equal + value: scalardb-cluster +``` diff --git a/docs/3.12/helm-charts/configure-custom-values-scalardb-graphql.md b/docs/3.12/helm-charts/configure-custom-values-scalardb-graphql.md new file mode 100644 index 00000000..b001c562 --- /dev/null +++ b/docs/3.12/helm-charts/configure-custom-values-scalardb-graphql.md @@ -0,0 +1,219 @@ +# [Deprecated] Configure a custom values file for ScalarDB GraphQL + +{% capture notice--info %} +**Note** + +ScalarDB GraphQL Server is now deprecated. Please use [ScalarDB Cluster](./configure-custom-values-scalardb-cluster.md) instead. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +This document explains how to create your custom values file for the ScalarDB GraphQL chart. If you want to know the details of the parameters, please refer to the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalardb-graphql/README.md) of the ScalarDB GraphQL chart. + +## Required configurations + +### Ingress configuration + +You must set `ingress` to listen the client requests. When you deploy multiple GraphQL servers, session affinity is required to handle transactions properly. This is because GraphQL servers keep the transactions in memory, so GraphQL queries that use continued transactions must be routed to the same server that started the transaction. + +For example, if you use NGINX Ingress Controller, you can set ingress configurations as follows. + +```yaml +ingress: + enabled: true + className: nginx + annotations: + nginx.ingress.kubernetes.io/session-cookie-path: / + nginx.ingress.kubernetes.io/affinity: cookie + nginx.ingress.kubernetes.io/session-cookie-name: INGRESSCOOKIE + nginx.ingress.kubernetes.io/session-cookie-hash: sha1 + nginx.ingress.kubernetes.io/session-cookie-max-age: "300" + hosts: + - host: "" + paths: + - path: /graphql + pathType: Exact +``` + +If you use ALB of AWS, you can set ingress configurations as follows. + +```yaml +ingress: + enabled: true + className: alb + annotations: + alb.ingress.kubernetes.io/scheme: internal + alb.ingress.kubernetes.io/target-group-attributes: stickiness.enabled=true,stickiness.lb_cookie.duration_seconds=60 + alb.ingress.kubernetes.io/target-type: ip + alb.ingress.kubernetes.io/healthcheck-path: /graphql?query=%7B__typename%7D + hosts: + - host: "" + paths: + - path: /graphql + pathType: Exact +``` + +### Image configurations + +You must set `image.repository`. Be sure to specify the ScalarDB GraphQL container image so that you can pull the image from the container repository. + +```yaml +image: + repository: +``` + +If you're using AWS or Azure, please refer to the following documents for more details: + +* [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md) +* [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md) + +### Database configurations + +You must set `scalarDbGraphQlConfiguration`. + +If you use ScalarDB Server with ScalarDB GraphQL (recommended), you must set the configuration to access the ScalarDB Server pods. + +```yaml +scalarDbGraphQlConfiguration: + contactPoints: + contactPort: 60051 + storage: "grpc" + transactionManager: "grpc" + namespaces: +``` + +## Optional configurations + +### Resource configurations (Recommended in the production environment) + +If you want to control pod resources using the requests and limits of Kubernetes, you can use `resources`. + +Note that the resources for one pod of Scalar products are limited to 2vCPU / 4GB memory from the perspective of the commercial license. Also, when you get the pay-as-you-go containers provided from AWS Marketplace, you cannot run those containers with more than 2vCPU / 4GB memory configuration in the `resources.limits`. When you exceed this limitation, pods are automatically stopped. + +You can configure them using the same syntax as the requests and limits of Kubernetes. So, please refer to the official document [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for more details on the requests and limits of Kubernetes. + +```yaml +resources: + requests: + cpu: 2000m + memory: 4Gi + limits: + cpu: 2000m + memory: 4Gi +``` + +### Affinity configurations (Recommended in the production environment) + +If you want to control pod deployment using the affinity and anti-affinity of Kubernetes, you can use `affinity`. + +You can configure them using the same syntax as the affinity of Kubernetes. So, please refer to the official document [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) for more details on the affinity configuration of Kubernetes. + +```yaml +affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/app + operator: In + values: + - scalardb-graphql + topologyKey: kubernetes.io/hostname + weight: 50 +``` + +### Prometheus/Grafana configurations (Recommended in the production environment) + +If you want to monitor ScalarDB GraphQL pods using [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), you can deploy a ConfigMap, a ServiceMonitor, and a PrometheusRule resource for kube-prometheus-stack using `grafanaDashboard.enabled`, `serviceMonitor.enabled`, and `prometheusRule.enabled`. + +```yaml +grafanaDashboard: + enabled: true + namespace: monitoring +serviceMonitor: + enabled: true + namespace: monitoring + interval: 15s +prometheusRule: + enabled: true + namespace: monitoring +``` + +### SecurityContext configurations (Default value is recommended) + +If you want to set SecurityContext and PodSecurityContext for ScalarDB GraphQL pods, you can use `securityContext` and `podSecurityContext`. + +You can configure them using the same syntax as SecurityContext and PodSecurityContext of Kubernetes. So, please refer to the official document [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for more details on the SecurityContext and PodSecurityContext configurations of Kubernetes. + +```yaml +podSecurityContext: + seccompProfile: + type: RuntimeDefault + +securityContext: + capabilities: + drop: + - ALL + runAsNonRoot: true + allowPrivilegeEscalation: false +``` + +### GraphQL Server configurations (Optional based on your environment) + +If you want to change the path to run the graphql queries, you can use `scalarDbGraphQlConfiguration.path`. By default, you can run the graphql queries using `http://:80/graphql`. + +You can also enable/disable [GraphiQL](https://github.com/graphql/graphiql/tree/main/packages/graphiql) using `scalarDbGraphQlConfiguration.graphiql`. + +```yaml +scalarDbGraphQlConfiguration: + path: /graphql + graphiql: "true" +``` + +### TLS configurations (Optional based on your environment) + +If you want to use TLS between the client and the ingress, you can use `ingress.tls`. + +You must create a Secret resource that includes a secret key and a certificate file. Please refer to the official document [Ingress - TLS](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) for more details on the Secret resource for Ingress. + +```yaml +ingress: + tls: + - hosts: + - foo.example.com + - bar.example.com + - bax.example.com + secretName: graphql-ingress-tls +``` + +### Replica configurations (Optional based on your environment) + +You can specify the number of replicas (pods) of ScalarDB GraphQL using `replicaCount`. + +```yaml +replicaCount: 3 +``` + +### Logging configurations (Optional based on your environment) + +If you want to change the log level of ScalarDB GraphQL, you can use `scalarDbGraphQlConfiguration.logLevel`. + +```yaml +scalarDbGraphQlConfiguration: + logLevel: INFO +``` + +### Taint and toleration configurations (Optional based on your environment) + +If you want to control pod deployment by using the taints and tolerations in Kubernetes, you can use `tolerations`. + +You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). + +```yaml +tolerations: + - effect: NoSchedule + key: scalar-labs.com/dedicated-node + operator: Equal + value: scalardb +``` diff --git a/docs/3.12/helm-charts/configure-custom-values-scalardb.md b/docs/3.12/helm-charts/configure-custom-values-scalardb.md new file mode 100644 index 00000000..55f2fb48 --- /dev/null +++ b/docs/3.12/helm-charts/configure-custom-values-scalardb.md @@ -0,0 +1,196 @@ +# [Deprecated] Configure a custom values file for ScalarDB Server + +{% capture notice--info %} +**Note** + +ScalarDB Server is now deprecated. Please use [ScalarDB Cluster](./configure-custom-values-scalardb-cluster.md) instead. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +This document explains how to create your custom values file for the ScalarDB Server chart. If you want to know the details of the parameters, please refer to the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalardb/README.md) of the ScalarDB Server chart. + +## Required configurations + +### Scalar Envoy configurations + +You must set the Scalar Envoy configurations in the custom values file for ScalarDB Server. This is because client requests are sent to ScalarDB Server via Scalar Envoy as the load balancer of gRPC requests if you deploy ScalarDB Server on a Kubernetes environment. + +Please refer to the document [Configure a custom values file for Scalar Envoy](configure-custom-values-envoy.md) for more details on the Scalar Envoy configurations. + +```yaml +envoy: + configurationsForScalarEnvoy: + ... + +scalardb: + configurationsForScalarDB: + ... +``` + +### Image configurations + +You must set `scalardb.image.repository`. Be sure to specify the ScalarDB Server container image so that you can pull the image from the container repository. + +```yaml +scalardb: + image: + repository: +``` + +If you're using AWS or Azure, please refer to the following documents for more details: + +* [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md) +* [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md) + +### Database configurations + +You must set `scalardb.databaseProperties`. Please set your `database.properties` to this parameter. Please refer to the [Configure ScalarDB Server](https://github.com/scalar-labs/scalardb/blob/master/docs/scalardb-server.md#configure-scalardb-server) for more details on the configuration of ScalarDB Server. + +```yaml +scalardb: + databaseProperties: | + scalar.db.server.port=60051 + scalar.db.server.prometheus_exporter_port=8080 + scalar.db.server.grpc.max_inbound_message_size= + scalar.db.server.grpc.max_inbound_metadata_size= + scalar.db.contact_points=localhost + scalar.db.username=cassandra + scalar.db.password=cassandra + scalar.db.storage=cassandra + scalar.db.transaction_manager=consensus-commit + scalar.db.consensus_commit.isolation_level=SNAPSHOT + scalar.db.consensus_commit.serializable_strategy= + scalar.db.consensus_commit.include_metadata.enabled=false +``` + +## Optional configurations + +### Resource configurations (Recommended in the production environment) + +If you want to control pod resources using the requests and limits of Kubernetes, you can use `scalardb.resources`. + +Note that the resources for one pod of Scalar products are limited to 2vCPU / 4GB memory from the perspective of the commercial license. Also, when you get the pay-as-you-go containers provided from AWS Marketplace, you cannot run those containers with more than 2vCPU / 4GB memory configuration in the `resources.limits`. When you exceed this limitation, pods are automatically stopped. + +You can configure them using the same syntax as the requests and limits of Kubernetes. So, please refer to the official document [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for more details on the requests and limits of Kubernetes. + +```yaml +scalardb: + resources: + requests: + cpu: 2000m + memory: 4Gi + limits: + cpu: 2000m + memory: 4Gi +``` + +### Secret configurations (Recommended in the production environment) + +If you want to use environment variables to set some properties (e.g., credentials) in the `scalardb.databaseProperties`, you can use `scalardb.secretName` to specify the Secret resource that includes some credentials. + +For example, you can set credentials for a backend database (`scalar.db.username` and `scalar.db.password`) using environment variables, which makes your pods more secure. + +Please refer to the document [How to use Secret resources to pass the credentials as the environment variables into the properties file](./use-secret-for-credentials.md) for more details on how to use a Secret resource. + +```yaml +scalardb: + secretName: "scalardb-credentials-secret" +``` + +### Affinity configurations (Recommended in the production environment) + +If you want to control pod deployment using the affinity and anti-affinity of Kubernetes, you can use `scalardb.affinity`. + +You can configure them using the same syntax as the affinity of Kubernetes. So, please refer to the official document [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) for more details on the affinity configuration of Kubernetes. + +```yaml +scalardb: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - scalardb + - key: app.kubernetes.io/app + operator: In + values: + - scalardb + topologyKey: kubernetes.io/hostname + weight: 50 +``` + +### Prometheus/Grafana configurations (Recommended in the production environment) + +If you want to monitor ScalarDB Server pods using [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), you can deploy a ConfigMap, a ServiceMonitor, and a PrometheusRule resource for kube-prometheus-stack using `scalardb.grafanaDashboard.enabled`, `scalardb.serviceMonitor.enabled`, and `scalardb.prometheusRule.enabled`. + +```yaml +scalardb: + grafanaDashboard: + enabled: true + namespace: monitoring + serviceMonitor: + enabled: true + namespace: monitoring + interval: 15s + prometheusRule: + enabled: true + namespace: monitoring +``` + +### SecurityContext configurations (Default value is recommended) + +If you want to set SecurityContext and PodSecurityContext for ScalarDB Server pods, you can use `scalardb.securityContext` and `scalardb.podSecurityContext`. + +You can configure them using the same syntax as SecurityContext and PodSecurityContext of Kubernetes. So, please refer to the official document [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for more details on the SecurityContext and PodSecurityContext configurations of Kubernetes. + +```yaml +scalardb: + podSecurityContext: + seccompProfile: + type: RuntimeDefault + securityContext: + capabilities: + drop: + - ALL + runAsNonRoot: true + allowPrivilegeEscalation: false +``` + +### Replica configurations (Optional based on your environment) + +You can specify the number of replicas (pods) of ScalarDB Server using `scalardb.replicaCount`. + +```yaml +scalardb: + replicaCount: 3 +``` + +### Logging configurations (Optional based on your environment) + +If you want to change the log level of ScalarDB Server, you can use `scalardb.storageConfiguration.dbLogLevel`. + +```yaml +scalardb: + storageConfiguration: + dbLogLevel: INFO +``` + +### Taint and toleration configurations (Optional based on your environment) + +If you want to control pod deployment by using the taints and tolerations in Kubernetes, you can use `scalardb.tolerations`. + +You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). + +```yaml +scalardb: + tolerations: + - effect: NoSchedule + key: scalar-labs.com/dedicated-node + operator: Equal + value: scalardb +``` diff --git a/docs/3.12/helm-charts/configure-custom-values-scalardl-auditor.md b/docs/3.12/helm-charts/configure-custom-values-scalardl-auditor.md new file mode 100644 index 00000000..39543bc9 --- /dev/null +++ b/docs/3.12/helm-charts/configure-custom-values-scalardl-auditor.md @@ -0,0 +1,191 @@ +# Configure a custom values file for ScalarDL Auditor + +This document explains how to create your custom values file for the ScalarDL Auditor chart. If you want to know the details of the parameters, please refer to the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalardl-audit/README.md) of the ScalarDL Auditor chart. + +## Required configurations + +### Scalar Envoy configurations + +You must set the Scalar Envoy configurations in the custom values file for ScalarDL Auditor. This is because client requests are sent to ScalarDL Auditor via Scalar Envoy as the load balancer of gRPC requests if you deploy ScalarDL Auditor on a Kubernetes environment. + +Please refer to the document [Configure a custom values file for Scalar Envoy](configure-custom-values-envoy.md) for more details on the Scalar Envoy configurations. + +```yaml +envoy: + configurationsForScalarEnvoy: + ... + +auditor: + configurationsForScalarDLAuditor: + ... +``` + +### Image configurations + +You must set `auditor.image.repository`. Be sure to specify the ScalarDL Auditor container image so that you can pull the image from the container repository. + +```yaml +auditor: + image: + repository: +``` + +If you're using AWS or Azure, please refer to the following documents for more details: + +* [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md) +* [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md) + +### Auditor/Database configurations + +You must set `auditor.auditorProperties`. Please set your `auditor.properties` to this parameter. Please refer to the [auditor.properties](https://github.com/scalar-labs/scalar/blob/master/auditor/conf/auditor.properties) for more details on the configuration of ScalarDL Auditor. + +```yaml +auditor: + auditorProperties: | + scalar.db.contact_points=localhost + scalar.db.username=cassandra + scalar.db.password=cassandra + scalar.db.storage=cassandra + scalar.dl.auditor.ledger.host= + scalar.dl.auditor.private_key_path=/keys/auditor-key-file + scalar.dl.auditor.cert_path=/keys/auditor-cert-file +``` + +### Key/Certificate configurations + +You must set a private key file to `scalar.dl.auditor.private_key_path` and a certificate file to `scalar.dl.auditor.cert_path`. + +You must also mount the private key file and the certificate file on the ScalarDL Auditor pod. + +For more details on how to mount the private key file and the certificate file, refer to [Mount key and certificate files on a pod in ScalarDL Helm Charts](./mount-files-or-volumes-on-scalar-pods.md#mount-key-and-certificate-files-on-a-pod-in-scalardl-helm-charts). + +## Optional configurations + +### Resource configurations (Recommended in the production environment) + +If you want to control pod resources using the requests and limits of Kubernetes, you can use `auditor.resources`. + +Note that the resources for one pod of Scalar products are limited to 2vCPU / 4GB memory from the perspective of the commercial license. Also, when you get the pay-as-you-go containers provided from AWS Marketplace, you cannot run those containers with more than 2vCPU / 4GB memory configuration in the `resources.limits`. When you exceed this limitation, pods are automatically stopped. + +You can configure them using the same syntax as the requests and limits of Kubernetes. So, please refer to the official document [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for more details on the requests and limits of Kubernetes. + +```yaml +auditor: + resources: + requests: + cpu: 2000m + memory: 4Gi + limits: + cpu: 2000m + memory: 4Gi +``` + +### Secret configurations + +If you want to use environment variables to set some properties (e.g., credentials) in the `auditor.auditorProperties`, you can use `auditor.secretName` to specify the Secret resource that includes some credentials. + +For example, you can set credentials for a backend database (`scalar.db.username` and `scalar.db.password`) using environment variables, which makes your pods more secure. + +Please refer to the document [How to use Secret resources to pass the credentials as the environment variables into the properties file](./use-secret-for-credentials.md) for more details on how to use a Secret resource. + +```yaml +auditor: + secretName: "auditor-credentials-secret" +``` + +### Affinity configurations (Recommended in the production environment) + +If you want to control pod deployment using the affinity and anti-affinity of Kubernetes, you can use `auditor.affinity`. + +You can configure them using the same syntax as the affinity of Kubernetes. So, please refer to the official document [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) for more details on the affinity configuration of Kubernetes. + +```yaml +auditor: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - scalardl-audit + - key: app.kubernetes.io/app + operator: In + values: + - auditor + topologyKey: kubernetes.io/hostname + weight: 50 +``` + +### Prometheus/Grafana configurations (Recommended in the production environment) + +If you want to monitor ScalarDL Auditor pods using [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), you can deploy a ConfigMap, a ServiceMonitor, and a PrometheusRule resource for kube-prometheus-stack using `auditor.grafanaDashboard.enabled`, `auditor.serviceMonitor.enabled`, and `auditor.prometheusRule.enabled`. + +```yaml +auditor: + grafanaDashboard: + enabled: true + namespace: monitoring + serviceMonitor: + enabled: true + namespace: monitoring + interval: 15s + prometheusRule: + enabled: true + namespace: monitoring +``` + +### SecurityContext configurations (Default value is recommended) + +If you want to set SecurityContext and PodSecurityContext for ScalarDL Auditor pods, you can use `auditor.securityContext` and `auditor.podSecurityContext`. + +You can configure them using the same syntax as SecurityContext and PodSecurityContext of Kubernetes. So, please refer to the official document [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for more details on the SecurityContext and PodSecurityContext configurations of Kubernetes. + +```yaml +auditor: + podSecurityContext: + seccompProfile: + type: RuntimeDefault + securityContext: + capabilities: + drop: + - ALL + runAsNonRoot: true + allowPrivilegeEscalation: false +``` + +### Replica configurations (Optional based on your environment) + +You can specify the number of replicas (pods) of ScalarDL Auditor using `auditor.replicaCount`. + +```yaml +auditor: + replicaCount: 3 +``` + +### Logging configurations (Optional based on your environment) + +If you want to change the log level of ScalarDL Auditor, you can use `auditor.scalarAuditorConfiguration.auditorLogLevel`. + +```yaml +auditor: + scalarAuditorConfiguration: + auditorLogLevel: INFO +``` + +### Taint and toleration configurations (Optional based on your environment) + +If you want to control pod deployment by using the taints and tolerations in Kubernetes, you can use `auditor.tolerations`. + +You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). + +```yaml +auditor: + tolerations: + - effect: NoSchedule + key: scalar-labs.com/dedicated-node + operator: Equal + value: scalardl-auditor +``` diff --git a/docs/3.12/helm-charts/configure-custom-values-scalardl-ledger.md b/docs/3.12/helm-charts/configure-custom-values-scalardl-ledger.md new file mode 100644 index 00000000..3b06fd50 --- /dev/null +++ b/docs/3.12/helm-charts/configure-custom-values-scalardl-ledger.md @@ -0,0 +1,191 @@ +# Configure a custom values file for ScalarDL Ledger + +This document explains how to create your custom values file for the ScalarDL Ledger chart. If you want to know the details of the parameters, please refer to the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/scalardl/README.md) of the ScalarDL Ledger chart. + +## Required configurations + +### Scalar Envoy configurations + +You must set the Scalar Envoy configurations in the custom values file for ScalarDL Ledger. This is because client requests are sent to ScalarDL Ledger via Scalar Envoy as the load balancer of gRPC requests if you deploy ScalarDL Ledger on a Kubernetes environment. + +Please refer to the document [Configure a custom values file for Scalar Envoy](configure-custom-values-envoy.md) for more details on the Scalar Envoy configurations. + +```yaml +envoy: + configurationsForScalarEnvoy: + ... + +ledger: + configurationsForScalarDLLedger: + ... +``` + +### Image configurations + +You must set `ledger.image.repository`. Be sure to specify the ScalarDL Ledger container image so that you can pull the image from the container repository. + +```yaml +ledger: + image: + repository: +``` + +If you're using AWS or Azure, please refer to the following documents for more details: + +* [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md) +* [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md) + +### Ledger/Database configurations + +You must set `ledger.ledgerProperties`. Please set your `ledger.properties` to this parameter. Please refer to the [ledger.properties](https://github.com/scalar-labs/scalar/blob/master/ledger/conf/ledger.properties) for more details on the configuration of ScalarDL Ledger. + +```yaml +ledger: + ledgerProperties: | + scalar.db.contact_points=localhost + scalar.db.username=cassandra + scalar.db.password=cassandra + scalar.db.storage=cassandra + scalar.dl.ledger.proof.enabled=true + scalar.dl.ledger.auditor.enabled=true + scalar.dl.ledger.proof.private_key_path=/keys/ledger-key-file +``` + +### Key/Certificate configurations + +If you set `scalar.dl.ledger.proof.enabled` to `true` (this configuration is required if you use ScalarDL Auditor), you must set a private key file to `scalar.dl.ledger.proof.private_key_path`. + +In this case, you must mount the private key file on the ScalarDL Ledger pod. + +For more details on how to mount the private key file, refer to [Mount key and certificate files on a pod in ScalarDL Helm Charts](./mount-files-or-volumes-on-scalar-pods.md#mount-key-and-certificate-files-on-a-pod-in-scalardl-helm-charts). + +## Optional configurations + +### Resource configurations (Recommended in the production environment) + +If you want to control pod resources using the requests and limits of Kubernetes, you can use `ledger.resources`. + +Note that the resources for one pod of Scalar products are limited to 2vCPU / 4GB memory from the perspective of the commercial license. Also, when you get the pay-as-you-go containers provided from AWS Marketplace, you cannot run those containers with more than 2vCPU / 4GB memory configuration in the `resources.limits`. When you exceed this limitation, pods are automatically stopped. + +You can configure them using the same syntax as the requests and limits of Kubernetes. So, please refer to the official document [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for more details on the requests and limits of Kubernetes. + +```yaml +ledger: + resources: + requests: + cpu: 2000m + memory: 4Gi + limits: + cpu: 2000m + memory: 4Gi +``` + +### Secret configurations (Recommended in the production environment) + +If you want to use environment variables to set some properties (e.g., credentials) in the `ledger.ledgerProperties`, you can use `ledger.secretName` to specify the Secret resource that includes some credentials. + +For example, you can set credentials for a backend database (`scalar.db.username` and `scalar.db.password`) using environment variables, which makes your pods more secure. + +Please refer to the document [How to use Secret resources to pass the credentials as the environment variables into the properties file](./use-secret-for-credentials.md) for more details on how to use a Secret resource. + +```yaml +ledger: + secretName: "ledger-credentials-secret" +``` + +### Affinity configurations (Recommended in the production environment) + +If you want to control pod deployment using the affinity and anti-affinity of Kubernetes, you can use `ledger.affinity`. + +You can configure them using the same syntax as the affinity of Kubernetes. So, please refer to the official document [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) for more details on the affinity configuration of Kubernetes. + +```yaml +ledger: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - scalardl + - key: app.kubernetes.io/app + operator: In + values: + - ledger + topologyKey: kubernetes.io/hostname + weight: 50 +``` + +### Prometheus/Grafana configurations (Recommended in the production environment) + +If you want to monitor ScalarDL Ledger pods using [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), you can deploy a ConfigMap, a ServiceMonitor, and a PrometheusRule resource for kube-prometheus-stack using `ledger.grafanaDashboard.enabled`, `ledger.serviceMonitor.enabled`, and `ledger.prometheusRule.enabled`. + +```yaml +ledger: + grafanaDashboard: + enabled: true + namespace: monitoring + serviceMonitor: + enabled: true + namespace: monitoring + interval: 15s + prometheusRule: + enabled: true + namespace: monitoring +``` + +### SecurityContext configurations (Default value is recommended) + +If you want to set SecurityContext and PodSecurityContext for ScalarDL Ledger pods, you can use `ledger.securityContext` and `ledger.podSecurityContext`. + +You can configure them using the same syntax as SecurityContext and PodSecurityContext of Kubernetes. So, please refer to the official document [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for more details on the SecurityContext and PodSecurityContext configurations of Kubernetes. + +```yaml +ledger: + podSecurityContext: + seccompProfile: + type: RuntimeDefault + securityContext: + capabilities: + drop: + - ALL + runAsNonRoot: true + allowPrivilegeEscalation: false +``` + +### Replica configurations (Optional based on your environment) + +You can specify the number of replicas (pods) of ScalarDL Ledger using `ledger.replicaCount`. + +```yaml +ledger: + replicaCount: 3 +``` + +### Logging configurations (Optional based on your environment) + +If you want to change the log level of ScalarDL Ledger, you can use `ledger.scalarLedgerConfiguration.ledgerLogLevel`. + +```yaml +ledger: + scalarLedgerConfiguration: + ledgerLogLevel: INFO +``` + +### Taint and toleration configurations (Optional based on your environment) + +If you want to control pod deployment by using the taints and tolerations in Kubernetes, you can use `ledger.tolerations`. + +You can configure taints and tolerations by using the same syntax as the tolerations in Kubernetes. For details on configuring tolerations in Kubernetes, see the official Kubernetes documentation [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). + +```yaml +ledger: + tolerations: + - effect: NoSchedule + key: scalar-labs.com/dedicated-node + operator: Equal + value: scalardl-ledger +``` diff --git a/docs/3.12/helm-charts/configure-custom-values-scalardl-schema-loader.md b/docs/3.12/helm-charts/configure-custom-values-scalardl-schema-loader.md new file mode 100644 index 00000000..655448d7 --- /dev/null +++ b/docs/3.12/helm-charts/configure-custom-values-scalardl-schema-loader.md @@ -0,0 +1,89 @@ +# Configure a custom values file for ScalarDL Schema Loader + +This document explains how to create your custom values file for the ScalarDL Schema Loader chart. If you want to know the details of the parameters, please refer to the [README](https://github.com/scalar-labs/helm-charts/blob/main/charts/schema-loading/README.md) of the ScalarDL Schema Loader chart. + +## Required configurations + +### Image configurations + +You must set `schemaLoading.image.repository`. Be sure to specify the ScalarDL Schema Loader container image so that you can pull the image from the container repository. + +```yaml +schemaLoading: + image: + repository: +``` + +If you're using AWS or Azure, please refer to the following documents for more details: + +* [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md) +* [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md) + +### Database configurations + +You must set `schemaLoading.databaseProperties`. Please set your `database.properties` to access the backend database to this parameter. Please refer to the [Getting Started with ScalarDB](https://github.com/scalar-labs/scalardb/blob/master/docs/getting-started-with-scalardb.md) for more details on the database configuration of ScalarDB. + +```yaml +schemaLoading: + databaseProperties: | + scalar.db.contact_points=cassandra + scalar.db.contact_port=9042 + scalar.db.username=cassandra + scalar.db.password=cassandra + scalar.db.storage=cassandra +``` + +### Schema type configurations + +You must set `schemaLoading.schemaType`. + +If you create the schema of ScalarDL Ledger, please set `ledger`. + +```yaml +schemaLoading: + schemaType: ledger +``` + +If you create the schema of ScalarDL Auditor, please set `auditor`. + +```yaml +schemaLoading: + schemaType: auditor +``` + +## Optional configurations + +### Secret configurations (Recommended in the production environment) + +If you want to use environment variables to set some properties (e.g., credentials) in the `schemaLoading.databaseProperties`, you can use `schemaLoading.secretName` to specify the Secret resource that includes some credentials. + +For example, you can set credentials for a backend database (`scalar.db.username` and `scalar.db.password`) using environment variables, which makes your pods more secure. + +Please refer to the document [How to use Secret resources to pass the credentials as the environment variables into the properties file](./use-secret-for-credentials.md) for more details on how to use a Secret resource. + +```yaml +schemaLoading: + secretName: "schema-loader-credentials-secret" +``` + +### Flags configurations (Optional based on your environment) + +You can specify several flags as an array. Please refer to the document [ScalarDB Schema Loader](https://github.com/scalar-labs/scalardb/blob/master/docs/schema-loader.md) for more details on the flags. + +```yaml +schemaLoading: + commandArgs: + - "--alter" + - "--compaction-strategy" + - "" + - "--delete-all" + - "--no-backup" + - "--no-scaling" + - "--repair-all" + - "--replication-factor" + - "" + - "--replication-strategy" + - "" + - "--ru" + - "" +``` diff --git a/docs/3.12/helm-charts/getting-started-logging.md b/docs/3.12/helm-charts/getting-started-logging.md new file mode 100644 index 00000000..5f5aa2f4 --- /dev/null +++ b/docs/3.12/helm-charts/getting-started-logging.md @@ -0,0 +1,96 @@ +# Getting Started with Helm Charts (Logging using Loki Stack) + +This document explains how to get started with log aggregation for Scalar products on Kubernetes using Grafana Loki (with Promtail). + +We assume that you have already read the [getting-started with monitoring](./getting-started-monitoring.md) for Scalar products and installed kube-prometheus-stack. + +## What we create + +We will deploy the following components on a Kubernetes cluster as follows. + +``` ++--------------------------------------------------------------------------------------------------+ +| +------------------------------------+ | +| | loki-stack | | +| | | +-----------------+ | +| | +--------------+ +--------------+ | <-----------------(Log)-------------- | Scalar Products | | +| | | Loki | | Promtail | | | | | +| | +--------------+ +--------------+ | | +-----------+ | | +| +------------------------------------+ | | ScalarDB | | | +| | +-----------+ | | +| +------------------------------------------------------+ | | | +| | kube-prometheus-stack | | +-----------+ | | +| | | | | ScalarDL | | | +| | +--------------+ +--------------+ +--------------+ | -----(Monitor)----> | +-----------+ | | +| | | Prometheus | | Alertmanager | | Grafana | | +-----------------+ | +| | +-------+------+ +------+-------+ +------+-------+ | | +| | | | | | | +| | +----------------+-----------------+ | | +| | | | | +| +--------------------------+---------------------------+ | +| | | +| | Kubernetes | ++----------------------------+---------------------------------------------------------------------+ + | <- expose to localhost (127.0.0.1) or use load balancer etc to access + | + (Access Dashboard through HTTP) + | + +----+----+ + | Browser | + +---------+ +``` + +## Step 1. Prepare a custom values file + +1. Get the sample file [scalar-loki-stack-custom-values.yaml](./conf/scalar-loki-stack-custom-values.yaml) for the `loki-stack` helm chart. + +## Step 2. Deploy `loki-stack` + +1. Add the `grafana` helm repository. + + ```console + helm repo add grafana https://grafana.github.io/helm-charts + ``` + +1. Deploy the `loki-stack` helm chart. + + ```console + helm install scalar-logging-loki grafana/loki-stack -n monitoring -f scalar-loki-stack-custom-values.yaml + ``` + +## Step 3. Add a Loki data source in the Grafana configuration + +1. Add a configuration of the Loki data source in the `scalar-prometheus-custom-values.yaml` file. + + ```yaml + grafana: + additionalDataSources: + - name: Loki + type: loki + uid: loki + url: http://scalar-logging-loki:3100/ + access: proxy + editable: false + isDefault: false + ``` + +1. Apply the configuration (upgrade the deployment of `kube-prometheus-stack`). + + ```console + helm upgrade scalar-monitoring prometheus-community/kube-prometheus-stack -n monitoring -f scalar-prometheus-custom-values.yaml + ``` + +## Step 4. Access the Grafana dashboard + +1. Add Loki as a data source + - Go to Grafana http://localhost:3000 (If you use minikube) + - Go to `Explore` to find the added Loki + - You can see the collected logs in the `Explore` page + +## Step 5. Delete the `loki-stack` helm chart + +1. Uninstall `loki-stack`. + + ```console + helm uninstall scalar-logging-loki -n monitoring + ``` diff --git a/docs/3.12/helm-charts/getting-started-monitoring.md b/docs/3.12/helm-charts/getting-started-monitoring.md new file mode 100644 index 00000000..9c4993a4 --- /dev/null +++ b/docs/3.12/helm-charts/getting-started-monitoring.md @@ -0,0 +1,256 @@ +# Getting Started with Helm Charts (Monitoring using Prometheus Operator) + +This document explains how to get started with Scalar products monitoring on Kubernetes using Prometheus Operator (kube-prometheus-stack). Here, we assume that you already have a Mac or Linux environment for testing. We use **Minikube** in this document, but the steps we will show should work in any Kubernetes cluster. + +## What we create + +We will deploy the following components on a Kubernetes cluster as follows. + +``` ++--------------------------------------------------------------------------------------------------+ +| +------------------------------------------------------+ +-----------------+ | +| | kube-prometheus-stack | | Scalar Products | | +| | | | | | +| | +--------------+ +--------------+ +--------------+ | -----(Monitor)----> | +-----------+ | | +| | | Prometheus | | Alertmanager | | Grafana | | | | ScalarDB | | | +| | +-------+------+ +------+-------+ +------+-------+ | | +-----------+ | | +| | | | | | | +-----------+ | | +| | +----------------+-----------------+ | | | ScalarDL | | | +| | | | | +-----------+ | | +| +--------------------------+---------------------------+ +-----------------+ | +| | | +| | Kubernetes | ++----------------------------+---------------------------------------------------------------------+ + | <- expose to localhost (127.0.0.1) or use load balancer etc to access + | + (Access Dashboard through HTTP) + | + +----+----+ + | Browser | + +---------+ +``` + +## Step 1. Start a Kubernetes cluster + +First, you need to prepare a Kubernetes cluster. If you use a **minikube** environment, please refer to the [Getting Started with Scalar Helm Charts](./getting-started-scalar-helm-charts.md). If you have already started a Kubernetes cluster, you can skip this step. + +## Step 2. Prepare a custom values file + +1. Save the sample file [scalar-prometheus-custom-values.yaml](./conf/scalar-prometheus-custom-values.yaml) for `kube-prometheus-stack`. + +1. Add custom values in the `scalar-prometheus-custom-values.yaml` as follows. + * settings + * `prometheus.service.type` to `LoadBalancer` + * `alertmanager.service.type` to `LoadBalancer` + * `grafana.service.type` to `LoadBalancer` + * `grafana.service.port` to `3000` + * Example + ```yaml + alertmanager: + + service: + type: LoadBalancer + + ... + + grafana: + + service: + type: LoadBalancer + port: 3000 + + ... + + prometheus: + + service: + type: LoadBalancer + + ... + ``` + * Note: + * If you want to customize the Prometheus Operator deployment by using Helm Charts, you'll need to set the following configurations to monitor Scalar products: + * Set `serviceMonitorSelectorNilUsesHelmValues` and `ruleSelectorNilUsesHelmValues` to `false` (`true` by default) so that Prometheus Operator can detect `ServiceMonitor` and `PrometheusRule` for Scalar products. + + * If you want to use Scalar Manager, you'll need to set the following configurations to enable Scalar Manager to collect CPU and memory resources: + * Set `kubeStateMetrics.enabled`, `nodeExporter.enabled`, and `kubelet.enabled` to `true`. + +## Step 3. Deploy `kube-prometheus-stack` + +1. Add the `prometheus-community` helm repository. + ```console + helm repo add prometheus-community https://prometheus-community.github.io/helm-charts + ``` + +1. Create a namespace `monitoring` on the Kubernetes. + ```console + kubectl create namespace monitoring + ``` + +1. Deploy the `kube-prometheus-stack`. + ```console + helm install scalar-monitoring prometheus-community/kube-prometheus-stack -n monitoring -f scalar-prometheus-custom-values.yaml + ``` + +## Step 4. Deploy (or Upgrade) Scalar products using Helm Charts + +* Note: + * The following explains the minimum steps. If you want to know more details about the deployment of ScalarDB and ScalarDL, please refer to the following documents. + * [Getting Started with Helm Charts (ScalarDB Server)](./getting-started-scalardb.md) + * [Getting Started with Helm Charts (ScalarDL Ledger / Ledger only)](./getting-started-scalardl-ledger.md) + * [Getting Started with Helm Charts (ScalarDL Ledger and Auditor / Auditor mode)](./getting-started-scalardl-auditor.md) + +1. To enable Prometheus monitoring of Scalar products, set `true` to the following configurations in the custom values file. + * Configurations + * `*.prometheusRule.enabled` + * `*.grafanaDashboard.enabled` + * `*.serviceMonitor.enabled` + * Sample configuration files + * ScalarDB (scalardb-custom-values.yaml) + ```yaml + envoy: + prometheusRule: + enabled: true + grafanaDashboard: + enabled: true + serviceMonitor: + enabled: true + + scalardb: + prometheusRule: + enabled: true + grafanaDashboard: + enabled: true + serviceMonitor: + enabled: true + ``` + * ScalarDL Ledger (scalardl-ledger-custom-values.yaml) + ```yaml + envoy: + prometheusRule: + enabled: true + grafanaDashboard: + enabled: true + serviceMonitor: + enabled: true + + ledger: + prometheusRule: + enabled: true + grafanaDashboard: + enabled: true + serviceMonitor: + enabled: true + ``` + * ScalarDL Auditor (scalardl-auditor-custom-values.yaml) + ```yaml + envoy: + prometheusRule: + enabled: true + grafanaDashboard: + enabled: true + serviceMonitor: + enabled: true + + auditor: + prometheusRule: + enabled: true + grafanaDashboard: + enabled: true + serviceMonitor: + enabled: true + ``` + +1. Deploy (or Upgrade) Scalar products using Helm Charts with the above custom values file. + * Examples + * ScalarDB + ```console + helm install scalardb scalar-labs/scalardb -f ./scalardb-custom-values.yaml + ``` + ```console + helm upgrade scalardb scalar-labs/scalardb -f ./scalardb-custom-values.yaml + ``` + * ScalarDL Ledger + ```console + helm install scalardl-ledger scalar-labs/scalardl -f ./scalardl-ledger-custom-values.yaml + ``` + ```console + helm upgrade scalardl-ledger scalar-labs/scalardl -f ./scalardl-ledger-custom-values.yaml + ``` + * ScalarDL Auditor + ```console + helm install scalardl-auditor scalar-labs/scalardl-audit -f ./scalardl-auditor-custom-values.yaml + ``` + ```console + helm upgrade scalardl-auditor scalar-labs/scalardl-audit -f ./scalardl-auditor-custom-values.yaml + ``` + +## Step 5. Access Dashboards + +### If you use minikube + +1. To expose each service resource as your `localhost (127.0.0.1)`, open another terminal, and run the `minikube tunnel` command. + ```console + minikube tunnel + ``` + + After running the `minikube tunnel` command, you can see the EXTERNAL-IP of each service resource as `127.0.0.1`. + ```console + kubectl get svc -n monitoring scalar-monitoring-kube-pro-prometheus scalar-monitoring-kube-pro-alertmanager scalar-monitoring-grafana + ``` + [Command execution result] + ```console + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + scalar-monitoring-kube-pro-prometheus LoadBalancer 10.98.11.12 127.0.0.1 9090:30550/TCP 26m + scalar-monitoring-kube-pro-alertmanager LoadBalancer 10.98.151.66 127.0.0.1 9093:31684/TCP 26m + scalar-monitoring-grafana LoadBalancer 10.103.19.4 127.0.0.1 3000:31948/TCP 26m + ``` + +1. Access each Dashboard. + * Prometheus + ```console + http://localhost:9090/ + ``` + * Alertmanager + ```console + http://localhost:9093/ + ``` + * Grafana + ```console + http://localhost:3000/ + ``` + * Note: + * You can see the user and password of Grafana as follows. + * user + ```console + kubectl get secrets scalar-monitoring-grafana -n monitoring -o jsonpath='{.data.admin-user}' | base64 -d + ``` + * password + ```console + kubectl get secrets scalar-monitoring-grafana -n monitoring -o jsonpath='{.data.admin-password}' | base64 -d + ``` + +### If you use other Kubernetes than minikube + +If you use a Kubernetes cluster other than minikube, you need to access the LoadBalancer service according to the manner of each Kubernetes cluster. For example, using a Load Balancer provided by cloud service or the `kubectl port-forward` command. + +## Step 6. Delete all resources + +After completing the Monitoring tests on the Kubernetes cluster, remove all resources. + +1. Terminate the `minikube tunnel` command. (If you use minikube) + ```console + Ctrl + C + ``` + +1. Uninstall `kube-prometheus-stack`. + ```console + helm uninstall scalar-monitoring -n monitoring + ``` + +1. Delete minikube. (Optional / If you use minikube) + ```console + minikube delete --all + ``` + * Note: + * If you deploy the ScalarDB or ScalarDL, you need to remove them before deleting minikube. diff --git a/docs/3.12/helm-charts/getting-started-scalar-helm-charts.md b/docs/3.12/helm-charts/getting-started-scalar-helm-charts.md new file mode 100644 index 00000000..4e1005e0 --- /dev/null +++ b/docs/3.12/helm-charts/getting-started-scalar-helm-charts.md @@ -0,0 +1,62 @@ +# Getting Started with Scalar Helm Charts + +This document explains how to get started with Scalar Helm Chart on a Kubernetes cluster as a test environment. Here, we assume that you already have a Mac or Linux environment for testing. We use **Minikube** in this document, but the steps we will show should work in any Kubernetes cluster. + +## Tools + +We will use the following tools for testing. + +1. minikube (If you use other Kubernetes distributions, minikube is not necessary.) +1. kubectl +1. Helm +1. cfssl / cfssljson + +## Step 1. Install tools + +First, you need to install the following tools used in this guide. + +1. Install the minikube according to the [minikube document](https://minikube.sigs.k8s.io/docs/start/) + +1. Install the kubectl according to the [Kubernetes document](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/) + +1. Install the helm command according to the [Helm document](https://helm.sh/docs/intro/install/) + +1. Install the cfssl and cfssljson according to the [CFSSL document](https://github.com/cloudflare/cfssl) + * Note: + * You need the cfssl and cfssljson when you try ScalarDL. If you try Scalar Helm Charts other than ScalarDL (e.g., ScalarDB, Monitoring, Logging, etc...), the cfssl and cfssljson are not necessary. + +## Step 2. Start minikube with docker driver (Optional / If you use minikube) + +1. Start minikube. + ```console + minikube start + ``` + +1. Check the status of the minikube and pods. + ```console + kubectl get pod -A + ``` + [Command execution result] + ```console + NAMESPACE NAME READY STATUS RESTARTS AGE + kube-system coredns-64897985d-lbsfr 1/1 Running 1 (20h ago) 21h + kube-system etcd-minikube 1/1 Running 1 (20h ago) 21h + kube-system kube-apiserver-minikube 1/1 Running 1 (20h ago) 21h + kube-system kube-controller-manager-minikube 1/1 Running 1 (20h ago) 21h + kube-system kube-proxy-gsl6j 1/1 Running 1 (20h ago) 21h + kube-system kube-scheduler-minikube 1/1 Running 1 (20h ago) 21h + kube-system storage-provisioner 1/1 Running 2 (19s ago) 21h + ``` + If the minikube starts properly, you can see some pods are **Running** in the kube-system namespace. + +## Step 3. + +After the Kubernetes cluster starts, you can try each Scalar Helm Charts on it. Please refer to the following documents for more details. + +* [ScalarDB Analytics with PostgreSQL](./getting-started-scalardb-analytics-postgresql.md) +* [ScalarDL Ledger (Ledger only)](./getting-started-scalardl-ledger.md) +* [ScalarDL Ledger and Auditor (Auditor mode)](./getting-started-scalardl-auditor.md) +* [Monitoring using Prometheus Operator](./getting-started-monitoring.md) + * [Logging using Loki Stack](./getting-started-logging.md) + * [Scalar Manager](./getting-started-scalar-manager.md) +* [[Deprecated] ScalarDB Server](./getting-started-scalardb.md) diff --git a/docs/3.12/helm-charts/getting-started-scalar-manager.md b/docs/3.12/helm-charts/getting-started-scalar-manager.md new file mode 100644 index 00000000..1864bab3 --- /dev/null +++ b/docs/3.12/helm-charts/getting-started-scalar-manager.md @@ -0,0 +1,154 @@ +# Getting Started with Helm Charts (Scalar Manager) +Scalar Manager is a web-based dashboard that allows users to: +* check the health of the Scalar products +* pause and unpause the Scalar products to backup or restore underlying databases +* check the metrics and logs of the Scalar products through Grafana dashboards + +The users can pause or unpause Scalar products through Scalar Manager to backup or restore the underlying databases. +Scalar Manager also embeds Grafana explorers by which the users can review the metrics or logs of the Scalar products. + +## Assumption +This guide assumes that the users are aware of how to deploy Scalar products with the monitoring and logging tools to a Kubernetes cluster. +If not, please start with [Getting Started with Scalar Helm Charts](./getting-started-scalar-helm-charts.md) before this guide. + +## Requirement + +* You need privileges to pull the Scalar Manager container (`scalar-manager`) from [GitHub Packages](https://github.com/orgs/scalar-labs/packages). +* You must create a Github Personal Access Token (PAT) with `read:packages` scope according to the [GitHub document](https://docs.github.com/en/github/authenticating-to-github/keeping-your-account-and-data-secure/creating-a-personal-access-token) to pull the above container. + +## What we create + +We will deploy the following components on a Kubernetes cluster as follows. + +``` ++--------------------------------------------------------------------------------------------------+ +| +----------------------+ | +| | scalar-manager | | +| | | | +| | +------------------+ | --------------------------(Manage)--------------------------+ | +| | | Scalar Manager | | | | +| | +------------------+ | | | +| +--+-------------------+ | | +| | | | +| +------------------------------------+ | | +| | loki-stack | V | +| | | +-----------------+ | +| | +--------------+ +--------------+ | <----------------(Log)--------------- | Scalar Products | | +| | | Loki | | Promtail | | | | | +| | +--------------+ +--------------+ | | +-----------+ | | +| +------------------------------------+ | | ScalarDB | | | +| | | +-----------+ | | +| +------------------------------------------------------+ | | | +| | kube-prometheus-stack | | +-----------+ | | +| | | | | ScalarDL | | | +| | +--------------+ +--------------+ +--------------+ | -----(Monitor)----> | +-----------+ | | +| | | Prometheus | | Alertmanager | | Grafana | | +-----------------+ | +| | +-------+------+ +------+-------+ +------+-------+ | | +| | | | | | | +| | +----------------+-----------------+ | | +| | | | | +| +--------------------------+---------------------------+ | +| | | | +| | | Kubernetes | ++----+-----------------------+---------------------------------------------------------------------+ + | | + expose to localhost (127.0.0.1) or use load balancer etc to access + | | + (Access Dashboard through HTTP) + | | ++----+----+ +----+----+ +| Browser | <-(Embed)-- + Browser | ++---------+ +---------+ +``` + +## Step 1. Upgrade the `kube-prometheus-stack` to allow Grafana to be embedded + +1. Add or revise this value to the custom values file (e.g. scalar-prometheus-custom-values.yaml) of the `kube-prometheus-stack` + + ```yaml + grafana: + grafana.ini: + security: + allow_embedding: true + cookie_samesite: disabled + ``` + +1. Upgrade the Helm installation + + ```console + helm upgrade scalar-monitoring prometheus-community/kube-prometheus-stack -n monitoring -f scalar-prometheus-custom-values.yaml + ``` + +## Step 2. Prepare a custom values file for Scalar Manager + +1. Get the sample file [scalar-manager-custom-values.yaml](./conf/scalar-manager-custom-values.yaml) for `scalar-manager`. + +1. Add the targets that you would like to manage. For example, if we want to manage a ledger cluster, then we can add the values as follows. + + ```yaml + scalarManager: + targets: + - name: my-ledgers-cluster + adminSrv: _scalardl-admin._tcp.scalardl-headless.default.svc.cluster.local + databaseType: cassandra + ``` + + Note: the `adminSrv` is the DNS Service URL that returns SRV record of pods. Kubernetes creates this URL for the named port of the headless service of the Scalar product. The format is `_{port name}._{protocol}.{service name}.{namespace}.svc.{cluster domain name}` + +1. Set the Grafana URL. For example, if your Grafana of the `kube-prometheus-stack` is exposed in `localhost:3000`, then we can set it as follows. + + ```yaml + scalarManager: + grafanaUrl: "http://localhost:3000" + ``` + +1. Set the refresh interval that Scalar Manager checks the status of the products. The default value is `30` seconds, but we can change it like: + + ```yaml + scalarManager: + refreshInterval: 60 # one minute + ``` + +1. Set the service type to access Scalar Manager. The default value is `ClusterIP`, but if we access using the `minikube tunnel` command or some load balancer, we can set it as `LoadBalancer`. + + ```yaml + service: + type: LoadBalancer + ``` + +## Step 3. Deploy `scalar-manager` + +1. Create a secret resource `reg-docker-secrets` to pull the Scalar Manager container image from GitHub Packages. + + ```console + kubectl create secret docker-registry reg-docker-secrets --docker-server=ghcr.io --docker-username= --docker-password= + ``` + +1. Deploy the `scalar-manager` Helm Chart. + + ```console + helm install scalar-manager scalar-labs/scalar-manager -f scalar-manager-custom-values.yaml + ``` + +## Step 4. Access Scalar Manager + +### If you use minikube + +1. To expose Scalar Manager's service resource as your `localhost (127.0.0.1)`, open another terminal, and run the `minikube tunnel` command. + + ```console + minikube tunnel + ``` + +1. Open the browser with URL `http://localhost:8000` + +### If you use other Kubernetes than minikube + +If you use a Kubernetes cluster other than minikube, you need to access the LoadBalancer service according to the manner of each Kubernetes cluster. For example, using a Load Balancer provided by cloud service or the `kubectl port-forward` command. + +## Step 5. Delete Scalar Manager +1. Uninstall `scalar-manager` + + ```console + helm uninstall scalar-manager + ``` diff --git a/docs/3.12/helm-charts/getting-started-scalardb-analytics-postgresql.md b/docs/3.12/helm-charts/getting-started-scalardb-analytics-postgresql.md new file mode 100644 index 00000000..1e2fe8fa --- /dev/null +++ b/docs/3.12/helm-charts/getting-started-scalardb-analytics-postgresql.md @@ -0,0 +1,510 @@ +# Getting Started with Helm Charts (ScalarDB Analytics with PostgreSQL) + +This guide explains how to get started with ScalarDB Analytics with PostgreSQL by using a Helm Chart in a Kubernetes cluster as a test environment. In addition, the contents of this guide assume that you already have a Mac or Linux environment set up for testing. Although **minikube** is mentioned, the steps described should work in any Kubernetes cluster. + +## What you will create + +You will deploy the following components in a Kubernetes cluster: + +``` ++-------------------------------------------------------------------------------------------------------------------------------------------+ +| [Kubernetes cluster] | +| | +| [Pod] [Pod] [Pod] | +| | +| +------------------------------------+ | +| +---> | ScalarDB Analytics with PostgreSQL | ---+ +-----------------------------+ | +| | +------------------------------------+ | +---> | MySQL ("customer" schema) | <---+ | +| | | | +-----------------------------+ | | +| +-------------+ +---------+ | +------------------------------------+ | | | | +| | OLAP client | ---> | Service | ---+---> | ScalarDB Analytics with PostgreSQL | ---+---+ +---+ | +| +-------------+ +---------+ | +------------------------------------+ | | | | | +| | | | +-----------------------------+ | | | +| | +------------------------------------+ | +---> | PostgreSQL ("order" schema) | <---+ | | +| +---> | ScalarDB Analytics with PostgreSQL | ---+ +-----------------------------+ | | +| +------------------------------------+ | | +| | | +| +-------------+ | | +| | OLTP client | ---(Load sample data with a test OLTP workload)-----------------------------------------------------------------------+ | +| +-------------+ | +| | ++-------------------------------------------------------------------------------------------------------------------------------------------+ +``` + +## Step 1. Start a Kubernetes cluster + +First, you need to prepare a Kubernetes cluster. If you're using a **minikube** environment, please refer to the [Getting Started with Scalar Helm Charts](./getting-started-scalar-helm-charts.md). If you have already started a Kubernetes cluster, you can skip this step. + +## Step 2. Start MySQL and PostgreSQL pods + +ScalarDB including ScalarDB Analytics with PostgreSQL can use several types of database systems as a backend database. In this guide, you will use MySQL and PostgreSQL. + +You can deploy MySQL and PostgreSQL on the Kubernetes cluster as follows: + +1. Add the Bitnami helm repository. + + ```console + helm repo add bitnami https://charts.bitnami.com/bitnami + ``` + +1. Update the helm repository. + + ```console + helm repo update bitnami + ``` + +1. Deploy MySQL. + + ```console + helm install mysql-scalardb bitnami/mysql \ + --set auth.rootPassword=mysql \ + --set primary.persistence.enabled=false + ``` + +1. Deploy PostgreSQL. + + ```console + helm install postgresql-scalardb bitnami/postgresql \ + --set auth.postgresPassword=postgres \ + --set primary.persistence.enabled=false + ``` + +1. Check if the MySQL and PostgreSQL pods are running. + + ```console + kubectl get pod + ``` + + You should see the following output: + + ```console + $ kubectl get pod + NAME READY STATUS RESTARTS AGE + mysql-scalardb-0 1/1 Running 0 3m17s + postgresql-scalardb-0 1/1 Running 0 3m12s + ``` + +## Step 3. Create a working directory + +Since you'll be creating some configuration files locally, create a working directory for those files. + + ```console + mkdir -p ~/scalardb-analytics-postgresql-test/ + ``` + +## Step 4. Set the versions of ScalarDB, ScalarDB Analytics with PostgreSQL, and the chart + +Set the following three environment variables. If you want to use another version of ScalarDB and ScalarDB Analytics with PostgreSQL, be sure to set them to the versions that you want to use. + +{% capture notice--info %} +**Note** + +You must use the same minor versions (for example, 3.10.x) of ScalarDB Analytics with PostgreSQL as ScalarDB, but you don't need to make the patch versions match. For example, you can use ScalarDB 3.10.1 and ScalarDB Analytics with PostgreSQL 3.10.3 together. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +```console +SCALARDB_VERSION=3.10.1 +``` + +```console +SCALARDB_ANALYTICS_WITH_POSTGRESQL_VERSION=3.10.3 +``` + +```console +CHART_VERSION=$(helm search repo scalar-labs/scalardb-analytics-postgresql -l | grep -e ${SCALARDB_ANALYTICS_WITH_POSTGRESQL_VERSION} | awk '{print $2}' | sort --version-sort -r | head -n 1) +``` + +## Step 5. Run OLTP transactions to load sample data to MySQL and PostgreSQL + +Before deploying ScalarDB Analytics with PostgreSQL, run the OLTP transactions to create sample data. + +1. Start an OLTP client pod in the Kubernetes cluster. + + ```console + kubectl run oltp-client --image eclipse-temurin:8-jdk-jammy --env SCALARDB_VERSION=${SCALARDB_VERSION} -- sleep inf + ``` + +1. Check if the OLTP client pod is running. + + ```console + kubectl get pod oltp-client + ``` + + You should see the following output: + + ```console + $ kubectl get pod oltp-client + NAME READY STATUS RESTARTS AGE + oltp-client 1/1 Running 0 17s + ``` + +1. Run bash in the OLTP client pod. + + ```console + kubectl exec -it oltp-client -- bash + ``` + + After this step, run each command in the OLTP client pod. + +1. Install the git and curl commands in the OLTP client pod. + + ```console + apt update && apt install -y curl git + ``` + +1. Clone the ScalarDB samples repository. + + ```console + git clone https://github.com/scalar-labs/scalardb-samples.git + ``` + +1. Go to the directory `scalardb-samples/multi-storage-transaction-sample/`. + + ```console + cd scalardb-samples/multi-storage-transaction-sample/ + ``` + + ```console + pwd + ``` + + You should see the following output: + + ```console + # pwd + /scalardb-samples/multi-storage-transaction-sample + ``` + +1. Create a configuration file (`database.properties`) to access MySQL and PostgreSQL in the Kubernetes cluster. + + ```console + cat << 'EOF' > database.properties + scalar.db.storage=multi-storage + scalar.db.multi_storage.storages=storage0,storage1 + + # Storage 0 + scalar.db.multi_storage.storages.storage0.storage=jdbc + scalar.db.multi_storage.storages.storage0.contact_points=jdbc:mysql://mysql-scalardb.default.svc.cluster.local:3306/ + scalar.db.multi_storage.storages.storage0.username=root + scalar.db.multi_storage.storages.storage0.password=mysql + + # Storage 1 + scalar.db.multi_storage.storages.storage1.storage=jdbc + scalar.db.multi_storage.storages.storage1.contact_points=jdbc:postgresql://postgresql-scalardb.default.svc.cluster.local:5432/postgres + scalar.db.multi_storage.storages.storage1.username=postgres + scalar.db.multi_storage.storages.storage1.password=postgres + + scalar.db.multi_storage.namespace_mapping=customer:storage0,order:storage1 + scalar.db.multi_storage.default_storage=storage1 + EOF + ``` + +1. Download Schema Loader from [ScalarDB Releases](https://github.com/scalar-labs/scalardb/releases). + + ```console + curl -OL https://github.com/scalar-labs/scalardb/releases/download/v${SCALARDB_VERSION}/scalardb-schema-loader-${SCALARDB_VERSION}.jar + ``` + +1. Run Schema Loader to create sample tables. + + ```console + java -jar scalardb-schema-loader-${SCALARDB_VERSION}.jar --config database.properties --schema-file schema.json --coordinator + ``` + +1. Load initial data for the sample workload. + + ```console + ./gradlew run --args="LoadInitialData" + ``` + +1. Run the sample workload of OLTP transactions. Running these commands will create several `order` entries as sample data. + + ```console + ./gradlew run --args="PlaceOrder 1 1:3,2:2" + ``` + + ```console + ./gradlew run --args="PlaceOrder 1 5:1" + ``` + + ```console + ./gradlew run --args="PlaceOrder 2 3:1,4:1" + ``` + + ```console + ./gradlew run --args="PlaceOrder 2 2:1" + ``` + + ```console + ./gradlew run --args="PlaceOrder 3 1:1" + ``` + + ```console + ./gradlew run --args="PlaceOrder 3 2:1" + ``` + + ```console + ./gradlew run --args="PlaceOrder 3 3:1" + ``` + + ```console + ./gradlew run --args="PlaceOrder 3 5:1" + ``` + + +1. Exit from OLTP client. + + ```console + exit + ``` + +## Step 6. Deploy ScalarDB Analytics with PostgreSQL + +After creating sample data via ScalarDB in the backend databases, deploy ScalarDB Analytics with PostgreSQL. + +1. Create a custom values file for ScalarDB Analytics with PostgreSQL (`scalardb-analytics-postgresql-custom-values.yaml`). + + ```console + cat << 'EOF' > ~/scalardb-analytics-postgresql-test/scalardb-analytics-postgresql-custom-values.yaml + scalardbAnalyticsPostgreSQL: + databaseProperties: | + scalar.db.storage=multi-storage + scalar.db.multi_storage.storages=storage0,storage1 + + # Storage 0 + scalar.db.multi_storage.storages.storage0.storage=jdbc + scalar.db.multi_storage.storages.storage0.contact_points=jdbc:mysql://mysql-scalardb.default.svc.cluster.local:3306/ + scalar.db.multi_storage.storages.storage0.username=root + scalar.db.multi_storage.storages.storage0.password=mysql + + # Storage 1 + scalar.db.multi_storage.storages.storage1.storage=jdbc + scalar.db.multi_storage.storages.storage1.contact_points=jdbc:postgresql://postgresql-scalardb.default.svc.cluster.local:5432/postgres + scalar.db.multi_storage.storages.storage1.username=postgres + scalar.db.multi_storage.storages.storage1.password=postgres + + scalar.db.multi_storage.namespace_mapping=customer:storage0,order:storage1 + scalar.db.multi_storage.default_storage=storage1 + schemaImporter: + namespaces: + - customer + - order + EOF + ``` + +1. Create a secret resource to set a superuser password for PostgreSQL. + + ```console + kubectl create secret generic scalardb-analytics-postgresql-superuser-password --from-literal=superuser-password=scalardb-analytics + ``` + +1. Deploy ScalarDB Analytics with PostgreSQL. + + ```console + helm install scalardb-analytics-postgresql scalar-labs/scalardb-analytics-postgresql -n default -f ~/scalardb-analytics-postgresql-test/scalardb-analytics-postgresql-custom-values.yaml --version ${CHART_VERSION} + ``` + +## Step 7. Run an OLAP client pod + +To run some queries via ScalarDB Analytics with PostgreSQL, run an OLAP client pod. + +1. Start an OLAP client pod in the Kubernetes cluster. + + ```console + kubectl run olap-client --image postgres:latest -- sleep inf + ``` + +1. Check if the OLAP client pod is running. + + ```console + kubectl get pod olap-client + ``` + + You should see the following output: + + ```console + $ kubectl get pod olap-client + NAME READY STATUS RESTARTS AGE + olap-client 1/1 Running 0 10s + ``` + +## Step 8. Run sample queries via ScalarDB Analytics with PostgreSQL + +After running the OLAP client pod, you can run some queries via ScalarDB Analytics with PostgreSQL. + +1. Run bash in the OLAP client pod. + + ```console + kubectl exec -it olap-client -- bash + ``` + + After this step, run each command in the OLAP client pod. + +1. Run the psql command to access ScalarDB Analytics with PostgreSQL. + + ```console + psql -h scalardb-analytics-postgresql -p 5432 -U postgres -d scalardb + ``` + + The password is `scalardb-analytics`. + +1. Read sample data in the `customer.customers` table. + + ```sql + SELECT * FROM customer.customers; + ``` + + You should see the following output: + + ```sql + customer_id | name | credit_limit | credit_total + -------------+---------------+--------------+-------------- + 1 | Yamada Taro | 10000 | 10000 + 2 | Yamada Hanako | 10000 | 9500 + 3 | Suzuki Ichiro | 10000 | 8500 + (3 rows) + ``` + +1. Read sample data in the `order.orders` table. + + ```sql + SELECT * FROM "order".orders; + ``` + + You should see the following output: + + ```sql + scalardb=# SELECT * FROM "order".orders; + customer_id | timestamp | order_id + -------------+---------------+-------------------------------------- + 1 | 1700124015601 | 5ae2a41b-990d-4a16-9700-39355e29adf8 + 1 | 1700124021273 | f3f23d93-3862-48be-8a57-8368b7c8689e + 2 | 1700124028182 | 696a895a-8998-4c3b-b112-4d5763bfcfd8 + 2 | 1700124036158 | 9215d63a-a9a2-4471-a990-45897f091ca5 + 3 | 1700124043744 | 9be70cd4-4f93-4753-9d89-68e250b2ac51 + 3 | 1700124051162 | 4e8ce2d2-488c-40d6-aa52-d9ecabfc68a8 + 3 | 1700124058096 | 658b6682-2819-41f2-91ee-2802a1f02857 + 3 | 1700124071240 | 4e2f94f4-53ec-4570-af98-7c648d8ed80f + (8 rows) + ``` + +1. Read sample data in the `order.statements` table. + + ```sql + SELECT * FROM "order".statements; + ``` + + You should see the following output: + + ```sql + scalardb=# SELECT * FROM "order".statements; + order_id | item_id | count + --------------------------------------+---------+------- + 5ae2a41b-990d-4a16-9700-39355e29adf8 | 2 | 2 + 5ae2a41b-990d-4a16-9700-39355e29adf8 | 1 | 3 + f3f23d93-3862-48be-8a57-8368b7c8689e | 5 | 1 + 696a895a-8998-4c3b-b112-4d5763bfcfd8 | 4 | 1 + 696a895a-8998-4c3b-b112-4d5763bfcfd8 | 3 | 1 + 9215d63a-a9a2-4471-a990-45897f091ca5 | 2 | 1 + 9be70cd4-4f93-4753-9d89-68e250b2ac51 | 1 | 1 + 4e8ce2d2-488c-40d6-aa52-d9ecabfc68a8 | 2 | 1 + 658b6682-2819-41f2-91ee-2802a1f02857 | 3 | 1 + 4e2f94f4-53ec-4570-af98-7c648d8ed80f | 5 | 1 + (10 rows) + ``` + +1. Read sample data in the `order.items` table. + + ```sql + SELECT * FROM "order".items; + ``` + + You should see the following output: + + ```sql + scalardb=# SELECT * FROM "order".items; + item_id | name | price + ---------+--------+------- + 5 | Melon | 3000 + 2 | Orange | 2000 + 4 | Mango | 5000 + 1 | Apple | 1000 + 3 | Grape | 2500 + (5 rows) + ``` + +1. Run the `JOIN` query. For example, you can see the credit remaining information of each user as follows. + + ```sql + SELECT * FROM ( + SELECT c.name, c.credit_limit - c.credit_total AS remaining, array_agg(i.name) OVER (PARTITION BY c.name) AS items + FROM "order".orders o + JOIN customer.customers c ON o.customer_id = c.customer_id + JOIN "order".statements s ON o.order_id = s.order_id + JOIN "order".items i ON s.item_id = i.item_id + ) AS remaining_info GROUP BY name, remaining, items; + ``` + + You should see the following output: + + ```sql + scalardb=# SELECT * FROM ( + scalardb(# SELECT c.name, c.credit_limit - c.credit_total AS remaining, array_agg(i.name) OVER (PARTITION BY c.name) AS items + scalardb(# FROM "order".orders o + scalardb(# JOIN customer.customers c ON o.customer_id = c.customer_id + scalardb(# JOIN "order".statements s ON o.order_id = s.order_id + scalardb(# JOIN "order".items i ON s.item_id = i.item_id + scalardb(# ) AS remaining_info GROUP BY name, remaining, items; + name | remaining | items + ---------------+-----------+---------------------------- + Suzuki Ichiro | 1500 | {Grape,Orange,Apple,Melon} + Yamada Hanako | 500 | {Orange,Grape,Mango} + Yamada Taro | 0 | {Orange,Melon,Apple} + (3 rows) + ``` + +1. Exit from the psql command. + + ```console + \q + ``` + +1. Exit from the OLAP client pod. + + ```console + exit + ``` + +## Step 9. Delete all resources + +After completing the ScalarDB Analytics with PostgreSQL tests on the Kubernetes cluster, remove all resources. + +1. Uninstall MySQL, PostgreSQL, and ScalarDB Analytics with PostgreSQL. + + ```console + helm uninstall mysql-scalardb postgresql-scalardb scalardb-analytics-postgresql + ``` + +1. Remove the client pods. + + ```console + kubectl delete pod oltp-client olap-client --grace-period 0 + ``` + +1. Remove the secret resource. + + ```console + kubectl delete secrets scalardb-analytics-postgresql-superuser-password + ``` + +1. Remove the working directory and sample files. + + ```console + cd ~ + ``` + + ```console + rm -rf ~/scalardb-analytics-postgresql-test/ + ``` diff --git a/docs/3.12/helm-charts/getting-started-scalardb.md b/docs/3.12/helm-charts/getting-started-scalardb.md new file mode 100644 index 00000000..773ca553 --- /dev/null +++ b/docs/3.12/helm-charts/getting-started-scalardb.md @@ -0,0 +1,384 @@ +# [Deprecated] Getting Started with Helm Charts (ScalarDB Server) + +{% capture notice--info %} +**Note** + +ScalarDB Server is now deprecated. Please use [ScalarDB Cluster](https://github.com/scalar-labs/scalardb-cluster/blob/main/docs/setup-scalardb-cluster-on-kubernetes-by-using-helm-chart.md) instead. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +This document explains how to get started with ScalarDB Server using Helm Chart on a Kubernetes cluster as a test environment. Here, we assume that you already have a Mac or Linux environment for testing. We use **Minikube** in this document, but the steps we will show should work in any Kubernetes cluster. + +## Requirement + +* You need to subscribe to ScalarDB in the [AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-rzbuhxgvqf4d2) or [Azure Marketplace](https://azuremarketplace.microsoft.com/en/marketplace/apps/scalarinc.scalardb) to get container images (`scalardb-server` and `scalardb-envoy`). Please refer to the following documents for more details. + * [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md) + * [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md) + +## What we create + +We will deploy the following components on a Kubernetes cluster as follows. + +``` ++--------------------------------------------------------------------------------------------------------------------------------------+ +| [Kubernetes Cluster] | +| | +| [Pod] [Pod] [Pod] [Pod] | +| | +| +-------+ +-----------------+ | +| +---> | Envoy | ---+ +---> | ScalarDB Server | ---+ | +| | +-------+ | | +-----------------+ | | +| | | | | | +| +--------+ +---------+ | +-------+ | +-------------------+ | +-----------------+ | +------------+ | +| | Client | ---> | Service | ---+---> | Envoy | ---+---> | Service | ---+---> | ScalarDB Server | ---+---> | PostgreSQL | | +| +--------+ | (Envoy) | | +-------+ | | (ScalarDB Server) | | +-----------------+ | +------------+ | +| +---------+ | | +-------------------+ | | | +| | +-------+ | | +-----------------+ | | +| +---> | Envoy | ---+ +---> | ScalarDB Server | ---+ | +| +-------+ +-----------------+ | +| | ++--------------------------------------------------------------------------------------------------------------------------------------+ +``` + +## Step 1. Start a Kubernetes cluster + +First, you need to prepare a Kubernetes cluster. If you use a **minikube** environment, please refer to the [Getting Started with Scalar Helm Charts](./getting-started-scalar-helm-charts.md). If you have already started a Kubernetes cluster, you can skip this step. + +## Step 2. Start a PostgreSQL container + +ScalarDB uses some kind of database system as a backend database. In this document, we use PostgreSQL. + +You can deploy PostgreSQL on the Kubernetes cluster as follows. + +1. Add the Bitnami helm repository. + ```console + helm repo add bitnami https://charts.bitnami.com/bitnami + ``` + +1. Deploy PostgreSQL. + ```console + helm install postgresql-scalardb bitnami/postgresql \ + --set auth.postgresPassword=postgres \ + --set primary.persistence.enabled=false + ``` + +1. Check if the PostgreSQL container is running. + ```console + kubectl get pod + ``` + [Command execution result] + ```console + NAME READY STATUS RESTARTS AGE + postgresql-scalardb-0 1/1 Running 0 2m42s + ``` + +## Step 3. Deploy ScalarDB Server on the Kubernetes cluster using Helm Charts + +1. Add the Scalar helm repository. + ```console + helm repo add scalar-labs https://scalar-labs.github.io/helm-charts + ``` + +1. Create a secret resource to pull the ScalarDB container images from AWS/Azure Marketplace. + * AWS Marketplace + ```console + kubectl create secret docker-registry reg-ecr-mp-secrets \ + --docker-server=709825985650.dkr.ecr.us-east-1.amazonaws.com \ + --docker-username=AWS \ + --docker-password=$(aws ecr get-login-password --region us-east-1) + ``` + * Azure Marketplace + ```console + kubectl create secret docker-registry reg-acr-secrets \ + --docker-server= \ + --docker-username= \ + --docker-password= + ``` + + Please refer to the following documents for more details. + + * [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md) + * [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md) + +1. Create a custom values file for ScalarDB Server (scalardb-custom-values.yaml). + * AWS Marketplace + + {% raw %} + ```console + cat << 'EOF' > scalardb-custom-values.yaml + envoy: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardb-envoy" + version: "1.3.0" + imagePullSecrets: + - name: "reg-ecr-mp-secrets" + + scalardb: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardb-server" + tag: "3.7.0" + imagePullSecrets: + - name: "reg-ecr-mp-secrets" + databaseProperties: | + scalar.db.storage=jdbc + scalar.db.contact_points=jdbc:postgresql://postgresql-scalardb.default.svc.cluster.local:5432/postgres + scalar.db.username={{ default .Env.SCALAR_DB_POSTGRES_USERNAME "" }} + scalar.db.password={{ default .Env.SCALAR_DB_POSTGRES_PASSWORD "" }} + secretName: "scalardb-credentials-secret" + EOF + ``` + {% endraw %} + + * Azure Marketplace + + {% raw %} + ```console + cat << 'EOF' > scalardb-custom-values.yaml + envoy: + image: + repository: "/scalarinc/scalardb-envoy" + version: "1.3.0" + imagePullSecrets: + - name: "reg-acr-secrets" + + scalardb: + image: + repository: "/scalarinc/scalardb-server" + tag: "3.7.0" + imagePullSecrets: + - name: "reg-acr-secrets" + databaseProperties: | + scalar.db.storage=jdbc + scalar.db.contact_points=jdbc:postgresql://postgresql-scalardb.default.svc.cluster.local:5432/postgres + scalar.db.username={{ default .Env.SCALAR_DB_POSTGRES_USERNAME "" }} + scalar.db.password={{ default .Env.SCALAR_DB_POSTGRES_PASSWORD "" }} + secretName: "scalardb-credentials-secret" + EOF + ``` + {% endraw %} + +1. Create a Secret resource that includes a username and password for PostgreSQL. + ```console + kubectl create secret generic scalardb-credentials-secret \ + --from-literal=SCALAR_DB_POSTGRES_USERNAME=postgres \ + --from-literal=SCALAR_DB_POSTGRES_PASSWORD=postgres + ``` + +1. Deploy ScalarDB Server. + ```console + helm install scalardb scalar-labs/scalardb -f ./scalardb-custom-values.yaml + ``` + +1. Check if the ScalarDB Server pods are deployed. + ```console + kubectl get pod + ``` + [Command execution result] + ```console + NAME READY STATUS RESTARTS AGE + postgresql-scalardb-0 1/1 Running 0 9m48s + scalardb-765598848b-75csp 1/1 Running 0 6s + scalardb-765598848b-w864f 1/1 Running 0 6s + scalardb-765598848b-x8rqj 1/1 Running 0 6s + scalardb-envoy-84c475f77b-kpz2p 1/1 Running 0 6s + scalardb-envoy-84c475f77b-n74tk 1/1 Running 0 6s + scalardb-envoy-84c475f77b-zbrwz 1/1 Running 0 6s + ``` + If the ScalarDB Server Pods are deployed properly, you can see the STATUS are **Running**. + +1. Check if the ScalarDB Server services are deployed. + ```console + kubectl get svc + ``` + [Command execution result] + ```console + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + kubernetes ClusterIP 10.96.0.1 443/TCP 47d + postgresql-scalardb ClusterIP 10.109.118.122 5432/TCP 10m + postgresql-scalardb-hl ClusterIP None 5432/TCP 10m + scalardb-envoy ClusterIP 10.110.110.250 60051/TCP 41s + scalardb-envoy-metrics ClusterIP 10.107.98.227 9001/TCP 41s + scalardb-headless ClusterIP None 60051/TCP 41s + scalardb-metrics ClusterIP 10.108.188.10 8080/TCP 41s + ``` + If the ScalarDB Server services are deployed properly, you can see private IP addresses in the CLUSTER-IP column. (Note: `scalardb-headless` has no CLUSTER-IP.) + +## Step 4. Start a Client container + +1. Start a Client container on the Kubernetes cluster. + ```console + kubectl run scalardb-client --image eclipse-temurin:8 --command sleep inf + ``` + +1. Check if the Client container is running. + ```console + kubectl get pod scalardb-client + ``` + [Command execution result] + ```console + NAME READY STATUS RESTARTS AGE + scalardb-client 1/1 Running 0 23s + ``` + +## Step 5. Run ScalarDB sample applications in the Client container + +The following explains the minimum steps. If you want to know more details about ScalarDB, please refer to the [Getting Started with ScalarDB](https://github.com/scalar-labs/scalardb/blob/master/docs/getting-started-with-scalardb.md). + +1. Run bash in the Client container. + ```console + kubectl exec -it scalardb-client -- bash + ``` + After this step, run each command in the Client container. + +1. Install the git and curl commands in the Client container. + ```console + apt update && apt install -y git curl + ``` + +1. Clone ScalarDB git repository. + ```console + git clone https://github.com/scalar-labs/scalardb.git + ``` + +1. Change the directory to `scalardb/`. + ```console + cd scalardb/ + ``` + ```console + pwd + ``` + [Command execution result] + ```console + /scalardb + ``` + +1. Change branch to arbitrary version. + ```console + git checkout -b v3.7.0 refs/tags/v3.7.0 + ``` + ```console + git branch + ``` + [Command execution result] + + {% raw %} + ```console + master + * v3.7.0 + ``` + {% endraw %} + + If you want to use another version, please specify the version (tag) you want to use. + +1. Change the directory to `docs/getting-started/`. + ```console + cd docs/getting-started/ + ``` + ```console + pwd + ``` + [Command execution result] + ```console + /scalardb/docs/getting-started + ``` + +1. Download Schema Loader from [ScalarDB Releases](https://github.com/scalar-labs/scalardb/releases). + ```console + curl -OL https://github.com/scalar-labs/scalardb/releases/download/v3.7.0/scalardb-schema-loader-3.7.0.jar + ``` + You need to use the same version of ScalarDB and Schema Loader. + +1. Create a configuration file (scalardb.properties) to access ScalarDB Server on the Kubernetes cluster. + ```console + cat << 'EOF' > scalardb.properties + scalar.db.contact_points=scalardb-envoy.default.svc.cluster.local + scalar.db.contact_port=60051 + scalar.db.storage=grpc + scalar.db.transaction_manager=grpc + EOF + ``` + +1. Create a JSON file (emoney-transaction.json) that defines DB Schema for the sample applications. + ```console + cat << 'EOF' > emoney-transaction.json + { + "emoney.account": { + "transaction": true, + "partition-key": [ + "id" + ], + "clustering-key": [], + "columns": { + "id": "TEXT", + "balance": "INT" + } + } + } + EOF + ``` + +1. Run Schema Loader (Create sample TABLE). + ```console + java -jar ./scalardb-schema-loader-3.7.0.jar --config ./scalardb.properties -f emoney-transaction.json --coordinator + ``` + +1. Run the sample applications. + * Charge `1000` to `user1`: + ```console + ./gradlew run --args="-action charge -amount 1000 -to user1" + ``` + * Charge `0` to `merchant1` (Just create an account for `merchant1`): + ```console + ./gradlew run --args="-action charge -amount 0 -to merchant1" + ``` + * Pay `100` from `user1` to `merchant1`: + ```console + ./gradlew run --args="-action pay -amount 100 -from user1 -to merchant1" + ``` + * Get the balance of `user1`: + ```console + ./gradlew run --args="-action getBalance -id user1" + ``` + * Get the balance of `merchant1`: + ```console + ./gradlew run --args="-action getBalance -id merchant1" + ``` + +1. (Optional) You can see the inserted and modified (INSERT/UPDATE) data through the sample applications using the following command. (This command needs to run on your localhost, not on the Client container.) + ```console + kubectl exec -it postgresql-scalardb-0 -- bash -c 'export PGPASSWORD=postgres && psql -U postgres -d postgres -c "SELECT * FROM emoney.account"' + ``` + [Command execution result] + ```sql + id | balance | tx_id | tx_state | tx_version | tx_prepared_at | tx_committed_at | before_tx_id | before_tx_state | before_tx_version | before_tx_prepared_at | before_tx_committed_at | before_balance + -----------+---------+--------------------------------------+----------+------------+----------------+-----------------+--------------------------------------+-----------------+-------------------+-----------------------+------------------------+---------------- + merchant1 | 100 | 65a90225-0846-4e97-b729-151f76f6ca2f | 3 | 2 | 1667361909634 |1667361909679 | 3633df99-a8ed-4301-a8b9-db1344807d7b | 3 | 1 | 1667361902466 | 1667361902485 | 0 + user1 | 900 | 65a90225-0846-4e97-b729-151f76f6ca2f | 3 | 2 | 1667361909634 |1667361909679 | 5520cba4-625a-4886-b81f-6089bf846d18 | 3 | 1 | 1667361897283 | 1667361897317 | 1000 + (2 rows) + ``` + * Note: + * Usually, you need to access data (records) through ScalarDB. The above command is used to explain and confirm the working of the sample applications. + +## Step 6. Delete all resources + +After completing the ScalarDB Server tests on the Kubernetes cluster, remove all resources. + +1. Uninstall ScalarDB Server and PostgreSQL. + ```console + helm uninstall scalardb postgresql-scalardb + ``` + +1. Remove the Client container. + + ```console + kubectl delete pod scalardb-client --force --grace-period 0 + ``` + +## Further reading + +You can see how to get started with monitoring or logging for Scalar products in the following documents. + +* [Getting Started with Helm Charts (Monitoring using Prometheus Operator)](./getting-started-monitoring.md) +* [Getting Started with Helm Charts (Logging using Loki Stack)](./getting-started-logging.md) +* [Getting Started with Helm Charts (Scalar Manager)](./getting-started-scalar-manager.md) diff --git a/docs/3.12/helm-charts/getting-started-scalardl-auditor.md b/docs/3.12/helm-charts/getting-started-scalardl-auditor.md new file mode 100644 index 00000000..8ef9da73 --- /dev/null +++ b/docs/3.12/helm-charts/getting-started-scalardl-auditor.md @@ -0,0 +1,1009 @@ +# Getting Started with Helm Charts (ScalarDL Ledger and Auditor / Auditor mode) + +This document explains how to get started with ScalarDL Ledger and Auditor using Helm Chart on a Kubernetes cluster as a test environment. Here, we assume that you already have a Mac or Linux environment for testing. We use **Minikube** in this document, but the steps we will show should work in any Kubernetes cluster. + +## Requirement + +You need to subscribe to ScalarDL Ledger and ScalarDL Auditor in the [AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-rzbuhxgvqf4d2) or [Azure Marketplace](https://azuremarketplace.microsoft.com/en/marketplace/apps/scalarinc.scalardb) to get the following container images. + * AWS Marketplace + * scalar-ledger + * scalar-ledger-envoy + * scalardl-schema-loader-ledger + * scalar-auditor + * scalar-auditor-envoy + * scalardl-schema-loader-auditor + * Azure Marketplace + * scalar-ledger + * scalar-auditor + * scalardl-envoy + * scalardl-schema-loader + +Please refer to the following documents for more details. + * [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md) + * [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md) + +## Note +To make Byzantine fault detection with auditing work properly, Ledger and Auditor should be deployed and managed in different administrative domains. However, in this guide, we will deploy Ledger and Auditor in the same Kubernetes cluster to make the test easier. + +## What we create + +We will deploy the following components on a Kubernetes cluster as follows. + +``` ++-----------------------------------------------------------------------------------------------------------------------------+ +| [Kubernetes Cluster] | +| [Pod] [Pod] [Pod] | +| | +| +-------+ +---------+ | +| +---> | Envoy | ---+ +---> | Ledger | ---+ | +| | +-------+ | | +---------+ | | +| | | | | | +| +---------+ | +-------+ | +-----------+ | +---------+ | +---------------+ | +| +---> | Service | ---+---> | Envoy | ---+---> | Service | ---+---> | Ledger | ---+---> | PostgreSQL | | +| | | (Envoy) | | +-------+ | | (Ledger) | | +---------+ | | (For Ledger) | | +| | +---------+ | | +-----------+ | | +---------------+ | +| | | +-------+ | | +---------+ | | +| | +---> | Envoy | ---+ +---> | Ledger | ---+ | +| +--------+ | +-------+ +---------+ | +| | Client | ---+ | +| +--------+ | +-------+ +---------+ | +| | +---> | Envoy | ---+ +---> | Auditor | ---+ | +| | | +-------+ | | +---------+ | | +| | | | | | | +| | +---------+ | +-------+ | +-----------+ | +---------+ | +---------------+ | +| +---> | Service | ---+---> | Envoy | ---+---> | Service | ---+---> | Auditor | ---+---> | PostgreSQL | | +| | (Envoy) | | +-------+ | | (Auditor) | | +---------+ | | (For Auditor) | | +| +---------+ | | +-----------+ | | +---------------+ | +| | +-------+ | | +---------+ | | +| +---> | Envoy | ---+ +---> | Auditor | ---+ | +| +-------+ +---------+ | +| | ++-----------------------------------------------------------------------------------------------------------------------------+ +``` + +## Step 1. Start a Kubernetes cluster + +First, you need to prepare a Kubernetes cluster. If you use a **minikube** environment, please refer to the [Getting Started with Scalar Helm Charts](./getting-started-scalar-helm-charts.md). If you have already started a Kubernetes cluster, you can skip this step. + +## Step 2. Start PostgreSQL containers + +ScalarDL Ledger and Auditor use some kind of database system as a backend database. In this document, we use PostgreSQL. + +You can deploy PostgreSQL on the Kubernetes cluster as follows. + +1. Add the Bitnami helm repository. + + ```console + helm repo add bitnami https://charts.bitnami.com/bitnami + ``` + +1. Deploy PostgreSQL for Ledger. + + ```console + helm install postgresql-ledger bitnami/postgresql \ + --set auth.postgresPassword=postgres \ + --set primary.persistence.enabled=false + ``` + +1. Deploy PostgreSQL for Auditor. + + ```console + helm install postgresql-auditor bitnami/postgresql \ + --set auth.postgresPassword=postgres \ + --set primary.persistence.enabled=false + ``` + +1. Check if the PostgreSQL containers are running. + + ```console + kubectl get pod + ``` + + [Command execution result] + + ```console + NAME READY STATUS RESTARTS AGE + postgresql-auditor-0 1/1 Running 0 11s + postgresql-ledger-0 1/1 Running 0 16s + ``` + +## Step 3. Create a working directory + +We will create some configuration files and key/certificate files locally. So, create a working directory for them. + +1. Create a working directory. + + ```console + mkdir -p ~/scalardl-test/certs/ + ``` + +## Step 4. Create key/certificate files + +Note: In this guide, we will use self-sign certificates for the test. However, it is strongly recommended that these certificates NOT be used in production. + +1. Change the working directory to `~/scalardl-test/certs/` directory. + + ```console + cd ~/scalardl-test/certs/ + ``` + +1. Create a JSON file that includes Ledger information. + + ```console + cat << 'EOF' > ~/scalardl-test/certs/ledger.json + { + "CN": "ledger", + "hosts": ["example.com","*.example.com"], + "key": { + "algo": "ecdsa", + "size": 256 + }, + "names": [ + { + "O": "ledger", + "OU": "test team", + "L": "Shinjuku", + "ST": "Tokyo", + "C": "JP" + } + ] + } + EOF + ``` + +1. Create a JSON file that includes Auditor information. + + ```console + cat << 'EOF' > ~/scalardl-test/certs/auditor.json + { + "CN": "auditor", + "hosts": ["example.com","*.example.com"], + "key": { + "algo": "ecdsa", + "size": 256 + }, + "names": [ + { + "O": "auditor", + "OU": "test team", + "L": "Shinjuku", + "ST": "Tokyo", + "C": "JP" + } + ] + } + EOF + ``` + +1. Create a JSON file that includes Client information. + + ```console + cat << 'EOF' > ~/scalardl-test/certs/client.json + { + "CN": "client", + "hosts": ["example.com","*.example.com"], + "key": { + "algo": "ecdsa", + "size": 256 + }, + "names": [ + { + "O": "client", + "OU": "test team", + "L": "Shinjuku", + "ST": "Tokyo", + "C": "JP" + } + ] + } + EOF + ``` + +1. Create key/certificate files for the Ledger. + + ```console + cfssl selfsign "" ./ledger.json | cfssljson -bare ledger + ``` + +1. Create key/certificate files for the Auditor. + + ```console + cfssl selfsign "" ./auditor.json | cfssljson -bare auditor + ``` + +1. Create key/certificate files for the Client. + + ```console + cfssl selfsign "" ./client.json | cfssljson -bare client + ``` + +1. Confirm key/certificate files are created. + + ```console + ls -1 + ``` + + [Command execution result] + + ```console + auditor-key.pem + auditor.csr + auditor.json + auditor.pem + client-key.pem + client.csr + client.json + client.pem + ledger-key.pem + ledger.csr + ledger.json + ledger.pem + ``` + +# Step 5. Create DB schemas for ScalarDL Ledger using Helm Charts + +We will deploy two ScalarDL Schema Loader pods on the Kubernetes cluster using Helm Charts. +The ScalarDL Schema Loader will create the DB schemas for ScalarDL Ledger and Auditor in PostgreSQL. + +1. Change the working directory to `~/scalardl-test/`. + + ```console + cd ~/scalardl-test/ + ``` + +1. Add the Scalar helm repository. + + ```console + helm repo add scalar-labs https://scalar-labs.github.io/helm-charts + ``` + +1. Create a secret resource to pull the ScalarDL container images from AWS/Azure Marketplace. + * AWS Marketplace + + ```console + kubectl create secret docker-registry reg-ecr-mp-secrets \ + --docker-server=709825985650.dkr.ecr.us-east-1.amazonaws.com \ + --docker-username=AWS \ + --docker-password=$(aws ecr get-login-password --region us-east-1) + ``` + + * Azure Marketplace + + ```console + kubectl create secret docker-registry reg-acr-secrets \ + --docker-server= \ + --docker-username= \ + --docker-password= + ``` + + Please refer to the following documents for more details. + + * [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md) + * [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md) + +1. Create a custom values file for ScalarDL Schema Loader for Ledger (schema-loader-ledger-custom-values.yaml). + * AWS Marketplace + + {% raw %} + ```console + cat << 'EOF' > ~/scalardl-test/schema-loader-ledger-custom-values.yaml + schemaLoading: + schemaType: "ledger" + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardl-schema-loader-ledger" + version: "3.6.0" + imagePullSecrets: + - name: "reg-ecr-mp-secrets" + databaseProperties: | + scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres + scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }} + scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }} + scalar.db.storage=jdbc + secretName: "ledger-credentials-secret" + EOF + ``` + {% endraw %} + + * Azure Marketplace + + {% raw %} + ```console + cat << 'EOF' > ~/scalardl-test/schema-loader-ledger-custom-values.yaml + schemaLoading: + schemaType: "ledger" + image: + repository: "/scalarinc/scalardl-schema-loader" + version: "3.6.0" + imagePullSecrets: + - name: "reg-acr-secrets" + databaseProperties: | + scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres + scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }} + scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }} + scalar.db.storage=jdbc + secretName: "ledger-credentials-secret" + EOF + ``` + {% endraw %} + +1. Create a custom values file for ScalarDL Schema Loader for Auditor (schema-loader-auditor-custom-values.yaml). + * AWS Marketplace + + {% raw %} + ```console + cat << 'EOF' > ~/scalardl-test/schema-loader-auditor-custom-values.yaml + schemaLoading: + schemaType: "auditor" + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardl-schema-loader-auditor" + version: "3.6.0" + imagePullSecrets: + - name: "reg-ecr-mp-secrets" + databaseProperties: | + scalar.db.contact_points=jdbc:postgresql://postgresql-auditor.default.svc.cluster.local:5432/postgres + scalar.db.username={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_USERNAME "" }} + scalar.db.password={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_PASSWORD "" }} + scalar.db.storage=jdbc + secretName: "auditor-credentials-secret" + EOF + ``` + {% endraw %} + + * Azure Marketplace + + {% raw %} + ```console + cat << 'EOF' > ~/scalardl-test/schema-loader-auditor-custom-values.yaml + schemaLoading: + schemaType: "auditor" + image: + repository: "/scalarinc/scalardl-schema-loader" + version: "3.6.0" + imagePullSecrets: + - name: "reg-acr-secrets" + databaseProperties: | + scalar.db.contact_points=jdbc:postgresql://postgresql-auditor.default.svc.cluster.local:5432/postgres + scalar.db.username={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_USERNAME "" }} + scalar.db.password={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_PASSWORD "" }} + scalar.db.storage=jdbc + secretName: "auditor-credentials-secret" + EOF + ``` + {% endraw %} + +1. Create a secret resource that includes a username and password for PostgreSQL for Ledger. + + ```console + kubectl create secret generic ledger-credentials-secret \ + --from-literal=SCALAR_DL_LEDGER_POSTGRES_USERNAME=postgres \ + --from-literal=SCALAR_DL_LEDGER_POSTGRES_PASSWORD=postgres + ``` + +1. Create a secret resource that includes a username and password for PostgreSQL for Auditor. + + ```console + kubectl create secret generic auditor-credentials-secret \ + --from-literal=SCALAR_DL_AUDITOR_POSTGRES_USERNAME=postgres \ + --from-literal=SCALAR_DL_AUDITOR_POSTGRES_PASSWORD=postgres + ``` + +1. Deploy the ScalarDL Schema Loader for Ledger. + + ```console + helm install schema-loader-ledger scalar-labs/schema-loading -f ./schema-loader-ledger-custom-values.yaml + ``` + +1. Deploy the ScalarDL Schema Loader for Auditor. + + ```console + helm install schema-loader-auditor scalar-labs/schema-loading -f ./schema-loader-auditor-custom-values.yaml + ``` + +1. Check if the ScalarDL Schema Loader pods are deployed and completed. + + ```console + kubectl get pod + ``` + + [Command execution result] + + ```console + NAME READY STATUS RESTARTS AGE + postgresql-auditor-0 1/1 Running 0 2m56s + postgresql-ledger-0 1/1 Running 0 3m1s + schema-loader-auditor-schema-loading-dvc5r 0/1 Completed 0 6s + schema-loader-ledger-schema-loading-mtllb 0/1 Completed 0 10s + ``` + + If the ScalarDL Schema Loader pods are **ContainerCreating** or **Running**, wait for the process will be completed (The STATUS will be **Completed**). + +## Step 6. Deploy ScalarDL Ledger and Auditor on the Kubernetes cluster using Helm Charts + +1. Create a custom values file for ScalarDL Ledger (scalardl-ledger-custom-values.yaml). + * AWS Marketplace + + {% raw %} + ```console + cat << 'EOF' > ~/scalardl-test/scalardl-ledger-custom-values.yaml + envoy: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalar-ledger-envoy" + version: "1.3.0" + imagePullSecrets: + - name: "reg-ecr-mp-secrets" + + ledger: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalar-ledger" + version: "3.6.0" + imagePullSecrets: + - name: "reg-ecr-mp-secrets" + ledgerProperties: | + scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres + scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }} + scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }} + scalar.db.storage=jdbc + scalar.dl.ledger.proof.enabled=true + scalar.dl.ledger.auditor.enabled=true + scalar.dl.ledger.proof.private_key_path=/keys/private-key + secretName: "ledger-credentials-secret" + extraVolumes: + - name: "ledger-keys" + secret: + secretName: "ledger-keys" + extraVolumeMounts: + - name: "ledger-keys" + mountPath: "/keys" + readOnly: true + EOF + ``` + {% endraw %} + + * Azure Marketplace + + {% raw %} + ```console + cat << 'EOF' > ~/scalardl-test/scalardl-ledger-custom-values.yaml + envoy: + image: + repository: "/scalarinc/scalardl-envoy" + version: "1.3.0" + imagePullSecrets: + - name: "reg-acr-secrets" + + ledger: + image: + repository: "/scalarinc/scalar-ledger" + version: "3.6.0" + imagePullSecrets: + - name: "reg-acr-secrets" + ledgerProperties: | + scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres + scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }} + scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }} + scalar.db.storage=jdbc + scalar.dl.ledger.proof.enabled=true + scalar.dl.ledger.proof.private_key_path=/keys/private-key + secretName: "ledger-credentials-secret" + extraVolumes: + - name: "ledger-keys" + secret: + secretName: "ledger-keys" + extraVolumeMounts: + - name: "ledger-keys" + mountPath: "/keys" + readOnly: true + EOF + ``` + {% endraw %} + +1. Create a custom values file for ScalarDL Auditor (scalardl-auditor-custom-values.yaml). + * AWS Marketplace + + {% raw %} + ```console + cat << 'EOF' > ~/scalardl-test/scalardl-auditor-custom-values.yaml + envoy: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalar-auditor-envoy" + version: "1.3.0" + imagePullSecrets: + - name: "reg-ecr-mp-secrets" + + auditor: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalar-auditor" + version: "3.6.0" + imagePullSecrets: + - name: "reg-ecr-mp-secrets" + auditorProperties: | + scalar.db.contact_points=jdbc:postgresql://postgresql-auditor.default.svc.cluster.local:5432/postgres + scalar.db.username={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_USERNAME "" }} + scalar.db.password={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_PASSWORD "" }} + scalar.db.storage=jdbc + scalar.dl.auditor.ledger.host=scalardl-ledger-envoy.default.svc.cluster.local + scalar.dl.auditor.cert_path=/keys/certificate + scalar.dl.auditor.private_key_path=/keys/private-key + secretName: "auditor-credentials-secret" + extraVolumes: + - name: "auditor-keys" + secret: + secretName: "auditor-keys" + extraVolumeMounts: + - name: "auditor-keys" + mountPath: "/keys" + readOnly: true + EOF + ``` + {% endraw %} + + * Azure Marketplace + + {% raw %} + ```console + cat << 'EOF' > ~/scalardl-test/scalardl-auditor-custom-values.yaml + envoy: + image: + repository: "/scalarinc/scalardl-envoy" + version: "1.3.0" + imagePullSecrets: + - name: "reg-acr-secrets" + + auditor: + image: + repository: "/scalarinc/scalar-auditor" + version: "3.6.0" + imagePullSecrets: + - name: "reg-acr-secrets" + auditorProperties: | + scalar.db.contact_points=jdbc:postgresql://postgresql-auditor.default.svc.cluster.local:5432/postgres + scalar.db.username={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_USERNAME "" }} + scalar.db.password={{ default .Env.SCALAR_DL_AUDITOR_POSTGRES_PASSWORD "" }} + scalar.db.storage=jdbc + scalar.dl.auditor.ledger.host=scalardl-ledger-envoy.default.svc.cluster.local + scalar.dl.auditor.cert_path=/keys/certificate + scalar.dl.auditor.private_key_path=/keys/private-key + secretName: "auditor-credentials-secret" + extraVolumes: + - name: "auditor-keys" + secret: + secretName: "auditor-keys" + extraVolumeMounts: + - name: "auditor-keys" + mountPath: "/keys" + readOnly: true + EOF + ``` + {% endraw %} + +1. Create secret resource `ledger-keys`. + + ```console + kubectl create secret generic ledger-keys --from-file=certificate=./certs/ledger.pem --from-file=private-key=./certs/ledger-key.pem + ``` + +1. Create secret resource `auditor-keys`. + + ```console + kubectl create secret generic auditor-keys --from-file=certificate=./certs/auditor.pem --from-file=private-key=./certs/auditor-key.pem + ``` + +1. Deploy the ScalarDL Ledger. + + ```console + helm install scalardl-ledger scalar-labs/scalardl -f ./scalardl-ledger-custom-values.yaml + ``` + +1. Deploy the ScalarDL Auditor. + + ```console + helm install scalardl-auditor scalar-labs/scalardl-audit -f ./scalardl-auditor-custom-values.yaml + ``` + +1. Check if the ScalarDL Ledger and Auditor pods are deployed. + + ```console + kubectl get pod + ``` + + [Command execution result] + + ```console + NAME READY STATUS RESTARTS AGE + postgresql-auditor-0 1/1 Running 0 14m + postgresql-ledger-0 1/1 Running 0 14m + scalardl-auditor-auditor-5b885ff4c8-fwkpf 1/1 Running 0 18s + scalardl-auditor-auditor-5b885ff4c8-g69cb 1/1 Running 0 18s + scalardl-auditor-auditor-5b885ff4c8-nsmnq 1/1 Running 0 18s + scalardl-auditor-envoy-689bcbdf65-5mn6v 1/1 Running 0 18s + scalardl-auditor-envoy-689bcbdf65-fpq8j 1/1 Running 0 18s + scalardl-auditor-envoy-689bcbdf65-lsz2t 1/1 Running 0 18s + scalardl-ledger-envoy-547bbf7546-n7p5x 1/1 Running 0 26s + scalardl-ledger-envoy-547bbf7546-p8nwp 1/1 Running 0 26s + scalardl-ledger-envoy-547bbf7546-pskpb 1/1 Running 0 26s + scalardl-ledger-ledger-6db5dc8774-5zsbj 1/1 Running 0 26s + scalardl-ledger-ledger-6db5dc8774-vnmrw 1/1 Running 0 26s + scalardl-ledger-ledger-6db5dc8774-wpjvs 1/1 Running 0 26s + schema-loader-auditor-schema-loading-dvc5r 0/1 Completed 0 11m + schema-loader-ledger-schema-loading-mtllb 0/1 Completed 0 11m + ``` + + If the ScalarDL Ledger and Auditor pods are deployed properly, you can see the STATUS are **Running**. + +1. Check if the ScalarDL Ledger and Auditor services are deployed. + + ```console + kubectl get svc + ``` + + [Command execution result] + + ```console + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + kubernetes ClusterIP 10.96.0.1 443/TCP 47d + postgresql-auditor ClusterIP 10.107.9.78 5432/TCP 15m + postgresql-auditor-hl ClusterIP None 5432/TCP 15m + postgresql-ledger ClusterIP 10.108.241.181 5432/TCP 15m + postgresql-ledger-hl ClusterIP None 5432/TCP 15m + scalardl-auditor-envoy ClusterIP 10.100.61.202 40051/TCP,40052/TCP 55s + scalardl-auditor-envoy-metrics ClusterIP 10.99.6.227 9001/TCP 55s + scalardl-auditor-headless ClusterIP None 40051/TCP,40053/TCP,40052/TCP 55s + scalardl-auditor-metrics ClusterIP 10.108.1.147 8080/TCP 55s + scalardl-ledger-envoy ClusterIP 10.101.191.116 50051/TCP,50052/TCP 61s + scalardl-ledger-envoy-metrics ClusterIP 10.106.52.103 9001/TCP 61s + scalardl-ledger-headless ClusterIP None 50051/TCP,50053/TCP,50052/TCP 61s + scalardl-ledger-metrics ClusterIP 10.99.122.106 8080/TCP 61s + ``` + + If the ScalarDL Ledger and Auditor services are deployed properly, you can see private IP addresses in the CLUSTER-IP column. (Note: `scalardl-ledger-headless` and `scalardl-auditor-headless` have no CLUSTER-IP.) + +## Step 7. Start a Client container + +We will use certificate files in a Client container. So, we create a secret resource and mount it to a Client container. + +1. Create secret resource `client-keys`. + + ```console + kubectl create secret generic client-keys --from-file=certificate=./certs/client.pem --from-file=private-key=./certs/client-key.pem + ``` + +1. Start a Client container on the Kubernetes cluster. + + ```console + cat << 'EOF' | kubectl apply -f - + apiVersion: v1 + kind: Pod + metadata: + name: "scalardl-client" + spec: + containers: + - name: scalardl-client + image: eclipse-temurin:8 + command: ['sleep'] + args: ['inf'] + volumeMounts: + - name: "ledger-keys" + mountPath: "/keys/ledger" + readOnly: true + - name: "auditor-keys" + mountPath: "/keys/auditor" + readOnly: true + - name: "client-keys" + mountPath: "/keys/client" + readOnly: true + volumes: + - name: "ledger-keys" + secret: + secretName: "ledger-keys" + - name: "auditor-keys" + secret: + secretName: "auditor-keys" + - name: "client-keys" + secret: + secretName: "client-keys" + restartPolicy: Never + EOF + ``` + +1. Check if the Client container is running. + + ```console + kubectl get pod scalardl-client + ``` + + [Command execution result] + + ```console + NAME READY STATUS RESTARTS AGE + scalardl-client 1/1 Running 0 4s + ``` + +## Step 8. Run ScalarDL sample contracts in the Client container + +The following explains the minimum steps. If you want to know more details about ScalarDL Ledger and Auditor, please refer to the following documents. + * [Getting Started with ScalarDL](https://github.com/scalar-labs/scalardl/blob/master/docs/getting-started.md) + * [Getting Started with ScalarDL Auditor](https://github.com/scalar-labs/scalardl/blob/master/docs/getting-started-auditor.md) + +When you use Auditor, you need to register the certificate for the Ledger and Auditor before starting the client application. Ledger needs to register its certificate to Auditor, and Auditor needs to register its certificate to Ledger. + +1. Run bash in the Client container. + + ```console + kubectl exec -it scalardl-client -- bash + ``` + + After this step, run each command in the Client container. + +1. Install the git, curl and unzip commands in the Client container. + + ```console + apt update && apt install -y git curl unzip + ``` + +1. Clone ScalarDL Java Client SDK git repository. + + ```console + git clone https://github.com/scalar-labs/scalardl-java-client-sdk.git + ``` + +1. Change the directory to `scalardl-java-client-sdk/`. + + ```console + cd scalardl-java-client-sdk/ + ``` + + ```console + pwd + ``` + + [Command execution result] + + ```console + /scalardl-java-client-sdk + ``` + +1. Change branch to arbitrary version. + + ```console + git checkout -b v3.6.0 refs/tags/v3.6.0 + ``` + + ```console + git branch + ``` + + [Command execution result] + + ```console + master + * v3.6.0 + ``` + + If you want to use another version, please specify the version (tag) you want to use. You need to use the same version of ScalarDL Ledger and ScalarDL Java Client SDK. + +1. Build the sample contracts. + + ```console + ./gradlew assemble + ``` + +1. Download CLI tools of ScalarDL from [ScalarDL Java Client SDK Releases](https://github.com/scalar-labs/scalardl-java-client-sdk/releases). + + ```console + curl -OL https://github.com/scalar-labs/scalardl-java-client-sdk/releases/download/v3.6.0/scalardl-java-client-sdk-3.6.0.zip + ``` + + You need to use the same version of CLI tools and ScalarDL Ledger. + +1. Unzip the `scalardl-java-client-sdk-3.6.0.zip` file. + + ```console + unzip ./scalardl-java-client-sdk-3.6.0.zip + ``` + +1. Create a configuration file (ledger.as.client.properties) to register the certificate of Ledger to Auditor. + + ```console + cat << 'EOF' > ledger.as.client.properties + # Ledger + scalar.dl.client.server.host=scalardl-ledger-envoy.default.svc.cluster.local + + # Auditor + scalar.dl.client.auditor.enabled=true + scalar.dl.client.auditor.host=scalardl-auditor-envoy.default.svc.cluster.local + + # Certificate + scalar.dl.client.cert_holder_id=ledger + scalar.dl.client.cert_path=/keys/ledger/certificate + scalar.dl.client.private_key_path=/keys/ledger/private-key + EOF + ``` + +1. Create a configuration file (auditor.as.client.properties) to register the certificate of Auditor to Ledger. + + ```console + cat << 'EOF' > auditor.as.client.properties + # Ledger + scalar.dl.client.server.host=scalardl-ledger-envoy.default.svc.cluster.local + + # Auditor + scalar.dl.client.auditor.enabled=true + scalar.dl.client.auditor.host=scalardl-auditor-envoy.default.svc.cluster.local + + # Certificate + scalar.dl.client.cert_holder_id=auditor + scalar.dl.client.cert_path=/keys/auditor/certificate + scalar.dl.client.private_key_path=/keys/auditor/private-key + EOF + ``` + +1. Create a configuration file (client.properties) to access ScalarDL Ledger on the Kubernetes cluster. + + ```console + cat << 'EOF' > client.properties + # Ledger + scalar.dl.client.server.host=scalardl-ledger-envoy.default.svc.cluster.local + + # Auditor + scalar.dl.client.auditor.enabled=true + scalar.dl.client.auditor.host=scalardl-auditor-envoy.default.svc.cluster.local + + # Certificate + scalar.dl.client.cert_holder_id=client + scalar.dl.client.cert_path=/keys/client/certificate + scalar.dl.client.private_key_path=/keys/client/private-key + EOF + ``` + +1. Register the certificate file of Ledger. + + ```console + ./scalardl-java-client-sdk-3.6.0/bin/register-cert --properties ./ledger.as.client.properties + ``` + +1. Register the certificate file of Auditor. + + ```console + ./scalardl-java-client-sdk-3.6.0/bin/register-cert --properties ./auditor.as.client.properties + ``` + +1. Register the certificate file of client. + + ```console + ./scalardl-java-client-sdk-3.6.0/bin/register-cert --properties ./client.properties + ``` + +1. Register the sample contract `StateUpdater`. + + ```console + ./scalardl-java-client-sdk-3.6.0/bin/register-contract --properties ./client.properties --contract-id StateUpdater --contract-binary-name com.org1.contract.StateUpdater --contract-class-file ./build/classes/java/main/com/org1/contract/StateUpdater.class + ``` + +1. Register the sample contract `StateReader`. + + ```console + ./scalardl-java-client-sdk-3.6.0/bin/register-contract --properties ./client.properties --contract-id StateReader --contract-binary-name com.org1.contract.StateReader --contract-class-file ./build/classes/java/main/com/org1/contract/StateReader.class + ``` + +1. Register the contract `ValdateLedger` to execute a validate request. + + ```console + ./scalardl-java-client-sdk-3.6.0/bin/register-contract --properties ./client.properties --contract-id validate-ledger --contract-binary-name com.scalar.dl.client.contract.ValidateLedger --contract-class-file ./build/classes/java/main/com/scalar/dl/client/contract/ValidateLedger.class + ``` + +1. Execute the contract `StateUpdater`. + + ```console + ./scalardl-java-client-sdk-3.6.0/bin/execute-contract --properties ./client.properties --contract-id StateUpdater --contract-argument '{"asset_id": "test_asset", "state": 3}' + ``` + + This sample contract updates the `state` (value) of the asset named `test_asset` to `3`. + +1. Execute the contract `StateReader`. + + ```console + ./scalardl-java-client-sdk-3.6.0/bin/execute-contract --properties ./client.properties --contract-id StateReader --contract-argument '{"asset_id": "test_asset"}' + ``` + + [Command execution result] + + ```console + Contract result: + { + "id" : "test_asset", + "age" : 0, + "output" : { + "state" : 3 + } + } + ``` + + * Reference information + * If the asset data is not tampered with, the contract execution request (execute-contract command) returns `OK` as a result. + * If the asset data is tampered with (e.g. the `state` value in the DB is tampered with), the contract execution request (execute-contract command) returns a value other than `OK` (e.g. `INCONSISTENT_STATES`) as a result, like the following. + [Command execution result (If the asset data is tampered with)] + + ```console + { + "status_code" : "INCONSISTENT_STATES", + "error_message" : "The results from Ledger and Auditor don't match" + } + ``` + + * In this way, the ScalarDL can detect data tampering. + +1. Execute a validation request for the asset. + + ```console + ./scalardl-java-client-sdk-3.6.0/bin/validate-ledger --properties ./client.properties --asset-id "test_asset" + ``` + + [Command execution result] + + ```console + { + "status_code" : "OK", + "Ledger" : { + "id" : "test_asset", + "age" : 0, + "nonce" : "3533427d-03cf-41d1-bf95-4d31eb0cb24d", + "hash" : "FiquvtPMKLlxKf4VGoccSAGsi9ptn4ozYVVTwdSzEQ0=", + "signature" : "MEYCIQDiiXqzw6K+Ml4uvn8rK43o5wHWESU3hoXnZPi6/OeKVwIhAM+tFBcapl6zg47Uq0Uc8nVNGWNHZLBDBGve3F0xkzTR" + }, + "Auditor" : { + "id" : "test_asset", + "age" : 0, + "nonce" : "3533427d-03cf-41d1-bf95-4d31eb0cb24d", + "hash" : "FiquvtPMKLlxKf4VGoccSAGsi9ptn4ozYVVTwdSzEQ0=", + "signature" : "MEUCIQDLsfUR2PmxSvfpL3YvHJUkz00RDpjCdctkroZKXE8d5QIgH73FQH2e11jfnynD00Pp9DrIG1vYizxDsvxUsMPo9IU=" + } + } + ``` + + * Reference information + * If the asset data is not tampered with, the validation request (validate-ledger command) returns `OK` as a result. + * If the asset data is tampered with (e.g. the `state` value in the DB is tampered with), the validation request (validate-ledger command) returns a value other than `OK` (e.g. `INVALID_OUTPUT`) as a result, like the following. + [Command execution result (If the asset data is tampered with)] + + ```console + { + "status_code" : "INCONSISTENT_STATES", + "error_message" : "The results from Ledger and Auditor don't match" + } + ``` + + * In this way, the ScalarDL Ledger can detect data tampering. + +## Step 9. Delete all resources + +After completing the ScalarDL Ledger tests on the Kubernetes cluster, remove all resources. + +1. Uninstall ScalarDL Ledger, ScalarDL Schema Loader, and PostgreSQL. + + ```console + helm uninstall scalardl-ledger schema-loader-ledger postgresql-ledger scalardl-auditor schema-loader-auditor postgresql-auditor + ``` + +1. Remove the Client container. + + ``` + kubectl delete pod scalardl-client --force --grace-period 0 + ``` + +1. Remove the working directory and sample files (configuration file, key, and certificate). + + ```console + cd ~ + ``` + + ```console + rm -rf ~/scalardl-test/ + ``` + +## Further reading + +You can see how to get started with monitoring or logging for Scalar products in the following documents. + +* [Getting Started with Helm Charts (Monitoring using Prometheus Operator)](./getting-started-monitoring.md) +* [Getting Started with Helm Charts (Logging using Loki Stack)](./getting-started-logging.md) +* [Getting Started with Helm Charts (Scalar Manager)](./getting-started-scalar-manager.md) diff --git a/docs/3.12/helm-charts/getting-started-scalardl-ledger.md b/docs/3.12/helm-charts/getting-started-scalardl-ledger.md new file mode 100644 index 00000000..59715b2b --- /dev/null +++ b/docs/3.12/helm-charts/getting-started-scalardl-ledger.md @@ -0,0 +1,687 @@ +# Getting Started with Helm Charts (ScalarDL Ledger / Ledger only) + +This document explains how to get started with ScalarDL Ledger using Helm Chart on a Kubernetes cluster as a test environment. Here, we assume that you already have a Mac or Linux environment for testing. We use **Minikube** in this document, but the steps we will show should work in any Kubernetes cluster. + +## Requirement + +You need to subscribe to ScalarDL Ledger in the [AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-rzbuhxgvqf4d2) or [Azure Marketplace](https://azuremarketplace.microsoft.com/en/marketplace/apps/scalarinc.scalardb) to get the following container images. + * AWS Marketplace + * scalar-ledger + * scalar-ledger-envoy + * scalardl-schema-loader-ledger + * Azure Marketplace + * scalar-ledger + * scalardl-envoy + * scalardl-schema-loader + +Please refer to the following documents for more details. + * [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md) + * [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md) + +## What we create + +We will deploy the following components on a Kubernetes cluster as follows. + +``` ++--------------------------------------------------------------------------------------------------------------------------------------+ +| [Kubernetes Cluster] | +| | +| [Pod] [Pod] [Pod] [Pod] | +| | +| +-------+ +-----------------+ | +| +---> | Envoy | ---+ +---> | ScalarDL Ledger | ---+ | +| | +-------+ | | +-----------------+ | | +| | | | | | +| +--------+ +---------+ | +-------+ | +-------------------+ | +-----------------+ | +------------+ | +| | Client | ---> | Service | ---+---> | Envoy | ---+---> | Service | ---+---> | ScalarDL Ledger | ---+---> | PostgreSQL | | +| +--------+ | (Envoy) | | +-------+ | | (ScalarDL Ledger) | | +-----------------+ | +------------+ | +| +---------+ | | +-------------------+ | | | +| | +-------+ | | +-----------------+ | | +| +---> | Envoy | ---+ +---> | ScalarDL Ledger | ---+ | +| +-------+ +-----------------+ | +| | ++--------------------------------------------------------------------------------------------------------------------------------------+ +``` + +## Step 1. Start a Kubernetes cluster + +First, you need to prepare a Kubernetes cluster. If you use a **minikube** environment, please refer to the [Getting Started with Scalar Helm Charts](./getting-started-scalar-helm-charts.md). If you have already started a Kubernetes cluster, you can skip this step. + +## Step 2. Start a PostgreSQL container + +ScalarDL Ledger uses some kind of database system as a backend database. In this document, we use PostgreSQL. + +You can deploy PostgreSQL on the Kubernetes cluster as follows. + +1. Add the Bitnami helm repository. + + ```console + helm repo add bitnami https://charts.bitnami.com/bitnami + ``` + +1. Deploy PostgreSQL. + + ```console + helm install postgresql-ledger bitnami/postgresql \ + --set auth.postgresPassword=postgres \ + --set primary.persistence.enabled=false + ``` + +1. Check if the PostgreSQL container is running. + + ```console + kubectl get pod + ``` + + [Command execution result] + + ```console + NAME READY STATUS RESTARTS AGE + postgresql-ledger-0 1/1 Running 0 11s + ``` + +## Step 3. Create a working directory + +We will create some configuration files and key/certificate files locally. So, create a working directory for them. + +1. Create a working directory. + + ```console + mkdir -p ~/scalardl-test/certs/ + ``` + +## Step 4. Create key/certificate files + +Note: In this guide, we will use self-sign certificates for the test. However, it is strongly recommended that these certificates NOT be used in production. + +1. Change the working directory to `~/scalardl-test/certs/` directory. + + ```console + cd ~/scalardl-test/certs/ + ``` + +1. Create a JSON file that includes Ledger information. + + ```console + cat << 'EOF' > ~/scalardl-test/certs/ledger.json + { + "CN": "ledger", + "hosts": ["example.com","*.example.com"], + "key": { + "algo": "ecdsa", + "size": 256 + }, + "names": [ + { + "O": "ledger", + "OU": "test team", + "L": "Shinjuku", + "ST": "Tokyo", + "C": "JP" + } + ] + } + EOF + ``` + +1. Create a JSON file that includes Client information. + + ```console + cat << 'EOF' > ~/scalardl-test/certs/client.json + { + "CN": "client", + "hosts": ["example.com","*.example.com"], + "key": { + "algo": "ecdsa", + "size": 256 + }, + "names": [ + { + "O": "client", + "OU": "test team", + "L": "Shinjuku", + "ST": "Tokyo", + "C": "JP" + } + ] + } + EOF + ``` + +1. Create key/certificate files for the Ledger. + + ```console + cfssl selfsign "" ./ledger.json | cfssljson -bare ledger + ``` + +1. Create key/certificate files for the Client. + + ```console + cfssl selfsign "" ./client.json | cfssljson -bare client + ``` + +1. Confirm key/certificate files are created. + + ```console + ls -1 + ``` + + [Command execution result] + + ```console + client-key.pem + client.csr + client.json + client.pem + ledger-key.pem + ledger.csr + ledger.json + ledger.pem + ``` + +## Step 5. Create DB schemas for ScalarDL Ledger using Helm Charts + +We will deploy a ScalarDL Schema Loader on the Kubernetes cluster using Helm Charts. +The ScalarDL Schema Loader will create the DB schemas for ScalarDL Ledger in PostgreSQL. + +1. Change the working directory to `~/scalardl-test/`. + + ```console + cd ~/scalardl-test/ + ``` + +1. Add the Scalar helm repository. + + ```console + helm repo add scalar-labs https://scalar-labs.github.io/helm-charts + ``` + +1. Create a secret resource to pull the ScalarDL container images from AWS/Azure Marketplace. + * AWS Marketplace + + ```console + kubectl create secret docker-registry reg-ecr-mp-secrets \ + --docker-server=709825985650.dkr.ecr.us-east-1.amazonaws.com \ + --docker-username=AWS \ + --docker-password=$(aws ecr get-login-password --region us-east-1) + ``` + + * Azure Marketplace + + ```console + kubectl create secret docker-registry reg-acr-secrets \ + --docker-server= \ + --docker-username= \ + --docker-password= + ``` + + Please refer to the following documents for more details. + + * [How to install Scalar products through AWS Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md) + * [How to install Scalar products through Azure Marketplace](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md) + +1. Create a custom values file for ScalarDL Schema Loader (schema-loader-ledger-custom-values.yaml). + * AWS Marketplace + + {% raw %} + ```console + cat << 'EOF' > ~/scalardl-test/schema-loader-ledger-custom-values.yaml + schemaLoading: + schemaType: "ledger" + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardl-schema-loader-ledger" + version: "3.6.0" + imagePullSecrets: + - name: "reg-ecr-mp-secrets" + databaseProperties: | + scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres + scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }} + scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }} + scalar.db.storage=jdbc + secretName: "ledger-credentials-secret" + EOF + ``` + {% endraw %} + * Azure Marketplace + + {% raw %} + ```console + cat << 'EOF' > ~/scalardl-test/schema-loader-ledger-custom-values.yaml + schemaLoading: + schemaType: "ledger" + image: + repository: "/scalarinc/scalardl-schema-loader" + version: "3.6.0" + imagePullSecrets: + - name: "reg-acr-secrets" + databaseProperties: | + scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres + scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }} + scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }} + scalar.db.storage=jdbc + secretName: "ledger-credentials-secret" + EOF + ``` + {% endraw %} + +1. Create a secret resource that includes a username and password for PostgreSQL. + + ```console + kubectl create secret generic ledger-credentials-secret \ + --from-literal=SCALAR_DL_LEDGER_POSTGRES_USERNAME=postgres \ + --from-literal=SCALAR_DL_LEDGER_POSTGRES_PASSWORD=postgres + ``` + +1. Deploy the ScalarDL Schema Loader. + + ```console + helm install schema-loader-ledger scalar-labs/schema-loading -f ./schema-loader-ledger-custom-values.yaml + ``` + +1. Check if the ScalarDL Schema Loader pod is deployed and completed. + + ```console + kubectl get pod + ``` + + [Command execution result] + + ```console + NAME READY STATUS RESTARTS AGE + postgresql-ledger-0 1/1 Running 0 11m + schema-loader-ledger-schema-loading-46rcr 0/1 Completed 0 3s + ``` + + If the ScalarDL Schema Loader pod is **ContainerCreating** or **Running**, wait for the process will be completed (The STATUS will be **Completed**). + +## Step 6. Deploy ScalarDL Ledger on the Kubernetes cluster using Helm Charts + +1. Create a custom values file for ScalarDL Ledger (scalardl-ledger-custom-values.yaml). + * AWS Marketplace + + {% raw %} + ```console + cat << 'EOF' > ~/scalardl-test/scalardl-ledger-custom-values.yaml + envoy: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalar-ledger-envoy" + version: "1.3.0" + imagePullSecrets: + - name: "reg-ecr-mp-secrets" + + ledger: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalar-ledger" + version: "3.6.0" + imagePullSecrets: + - name: "reg-ecr-mp-secrets" + ledgerProperties: | + scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres + scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }} + scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }} + scalar.db.storage=jdbc + scalar.dl.ledger.proof.enabled=true + scalar.dl.ledger.proof.private_key_path=/keys/private-key + secretName: "ledger-credentials-secret" + extraVolumes: + - name: "ledger-keys" + secret: + secretName: "ledger-keys" + extraVolumeMounts: + - name: "ledger-keys" + mountPath: "/keys" + readOnly: true + EOF + ``` + {% endraw %} + + * Azure Marketplace + + {% raw %} + ```console + cat << 'EOF' > ~/scalardl-test/scalardl-ledger-custom-values.yaml + envoy: + image: + repository: "/scalarinc/scalardl-envoy" + version: "1.3.0" + imagePullSecrets: + - name: "reg-acr-secrets" + + ledger: + image: + repository: "/scalarinc/scalar-ledger" + version: "3.6.0" + imagePullSecrets: + - name: "reg-acr-secrets" + ledgerProperties: | + scalar.db.contact_points=jdbc:postgresql://postgresql-ledger.default.svc.cluster.local:5432/postgres + scalar.db.username={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_USERNAME "" }} + scalar.db.password={{ default .Env.SCALAR_DL_LEDGER_POSTGRES_PASSWORD "" }} + scalar.db.storage=jdbc + scalar.dl.ledger.proof.enabled=true + scalar.dl.ledger.proof.private_key_path=/keys/private-key + secretName: "ledger-credentials-secret" + extraVolumes: + - name: "ledger-keys" + secret: + secretName: "ledger-keys" + extraVolumeMounts: + - name: "ledger-keys" + mountPath: "/keys" + readOnly: true + EOF + ``` + {% endraw %} + +1. Create secret resource `ledger-keys`. + + ```console + kubectl create secret generic ledger-keys --from-file=private-key=./certs/ledger-key.pem + ``` + +1. Deploy the ScalarDL Ledger. + + ```console + helm install scalardl-ledger scalar-labs/scalardl -f ./scalardl-ledger-custom-values.yaml + ``` + +1. Check if the ScalarDL Ledger pods are deployed. + + ```console + kubectl get pod + ``` + + [Command execution result] + + ```console + NAME READY STATUS RESTARTS AGE + postgresql-ledger-0 1/1 Running 0 14m + scalardl-ledger-envoy-547bbf7546-6cn88 1/1 Running 0 52s + scalardl-ledger-envoy-547bbf7546-rpg5p 1/1 Running 0 52s + scalardl-ledger-envoy-547bbf7546-x2vlg 1/1 Running 0 52s + scalardl-ledger-ledger-9bdf7f8bd-29bzm 1/1 Running 0 52s + scalardl-ledger-ledger-9bdf7f8bd-9fklw 1/1 Running 0 52s + scalardl-ledger-ledger-9bdf7f8bd-9tw5x 1/1 Running 0 52s + schema-loader-ledger-schema-loading-46rcr 0/1 Completed 0 3m38s + ``` + + If the ScalarDL Ledger pods are deployed properly, you can see the STATUS are **Running**. + +1. Check if the ScalarDL Ledger services are deployed. + + ```console + kubectl get svc + ``` + + [Command execution result] + + ```console + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + kubernetes ClusterIP 10.96.0.1 443/TCP 47d + postgresql-ledger ClusterIP 10.109.253.150 5432/TCP 15m + postgresql-ledger-hl ClusterIP None 5432/TCP 15m + scalardl-ledger-envoy ClusterIP 10.106.141.153 50051/TCP,50052/TCP 83s + scalardl-ledger-envoy-metrics ClusterIP 10.108.36.136 9001/TCP 83s + scalardl-ledger-headless ClusterIP None 50051/TCP,50053/TCP,50052/TCP 83s + scalardl-ledger-metrics ClusterIP 10.98.4.217 8080/TCP 83s + ``` + + If the ScalarDL Ledger services are deployed properly, you can see private IP addresses in the CLUSTER-IP column. (Note: `scalardl-ledger-headless` has no CLUSTER-IP.) + +## Step 7. Start a Client container + +We will use certificate files in a Client container. So, we create a secret resource and mount it to a Client container. + +1. Create secret resource `client-keys`. + + ```console + kubectl create secret generic client-keys --from-file=certificate=./certs/client.pem --from-file=private-key=./certs/client-key.pem + ``` + +1. Start a Client container on the Kubernetes cluster. + + ```console + cat << 'EOF' | kubectl apply -f - + apiVersion: v1 + kind: Pod + metadata: + name: "scalardl-client" + spec: + containers: + - name: scalardl-client + image: eclipse-temurin:8 + command: ['sleep'] + args: ['inf'] + volumeMounts: + - name: "client-keys" + mountPath: "/keys" + readOnly: true + volumes: + - name: "client-keys" + secret: + secretName: "client-keys" + restartPolicy: Never + EOF + ``` + +1. Check if the Client container is running. + + ```console + kubectl get pod scalardl-client + ``` + + [Command execution result] + + ```console + NAME READY STATUS RESTARTS AGE + scalardl-client 1/1 Running 0 11s + ``` + +## Step 8. Run ScalarDL sample contracts in the Client container + +The following explains the minimum steps. If you want to know more details about ScalarDL and the contract, please refer to the [Getting Started with ScalarDL](https://github.com/scalar-labs/scalardl/blob/master/docs/getting-started.md). + +1. Run bash in the Client container. + + ```console + kubectl exec -it scalardl-client -- bash + ``` + + After this step, run each command in the Client container. + +1. Install the git, curl and unzip commands in the Client container. + + ```console + apt update && apt install -y git curl unzip + ``` + +1. Clone ScalarDL Java Client SDK git repository. + + ```console + git clone https://github.com/scalar-labs/scalardl-java-client-sdk.git + ``` + +1. Change the directory to `scalardl-java-client-sdk/`. + + ```console + cd scalardl-java-client-sdk/ + ``` + + ```console + pwd + ``` + + [Command execution result] + + ```console + /scalardl-java-client-sdk + ``` + +1. Change branch to arbitrary version. + + ```console + git checkout -b v3.6.0 refs/tags/v3.6.0 + ``` + + ```console + git branch + ``` + + [Command execution result] + ```console + master + * v3.6.0 + ``` + + If you want to use another version, please specify the version (tag) you want to use. You need to use the same version of ScalarDL Ledger and ScalarDL Java Client SDK. + +1. Build the sample contracts. + + ```console + ./gradlew assemble + ``` + +1. Download CLI tools of ScalarDL from [ScalarDL Java Client SDK Releases](https://github.com/scalar-labs/scalardl-java-client-sdk/releases). + + ```console + curl -OL https://github.com/scalar-labs/scalardl-java-client-sdk/releases/download/v3.6.0/scalardl-java-client-sdk-3.6.0.zip + ``` + + You need to use the same version of CLI tools and ScalarDL Ledger. + +1. Unzip the `scalardl-java-client-sdk-3.6.0.zip` file. + + ```console + unzip ./scalardl-java-client-sdk-3.6.0.zip + ``` + +1. Create a configuration file (client.properties) to access ScalarDL Ledger on the Kubernetes cluster. + + ```console + cat << 'EOF' > client.properties + scalar.dl.client.server.host=scalardl-ledger-envoy.default.svc.cluster.local + scalar.dl.client.cert_holder_id=client + scalar.dl.client.cert_path=/keys/certificate + scalar.dl.client.private_key_path=/keys/private-key + EOF + ``` + +1. Register the certificate file of the client. + ```console + ./scalardl-java-client-sdk-3.6.0/bin/register-cert --properties ./client.properties + ``` + +1. Register the sample contract `StateUpdater`. + + ```console + ./scalardl-java-client-sdk-3.6.0/bin/register-contract --properties ./client.properties --contract-id StateUpdater --contract-binary-name com.org1.contract.StateUpdater --contract-class-file ./build/classes/java/main/com/org1/contract/StateUpdater.class + ``` + +1. Register the sample contract `StateReader`. + + ```console + ./scalardl-java-client-sdk-3.6.0/bin/register-contract --properties ./client.properties --contract-id StateReader --contract-binary-name com.org1.contract.StateReader --contract-class-file ./build/classes/java/main/com/org1/contract/StateReader.class + ``` + +1. Execute the contract `StateUpdater`. + + ```console + ./scalardl-java-client-sdk-3.6.0/bin/execute-contract --properties ./client.properties --contract-id StateUpdater --contract-argument '{"asset_id": "test_asset", "state": 3}' + ``` + This sample contract updates the `state` (value) of the asset named `test_asset` to `3`. + +1. Execute the contract `StateReader`. + + ```console + ./scalardl-java-client-sdk-3.6.0/bin/execute-contract --properties ./client.properties --contract-id StateReader --contract-argument '{"asset_id": "test_asset"}' + ``` + + [Command execution result] + + ```console + Contract result: + { + "id" : "test_asset", + "age" : 0, + "output" : { + "state" : 3 + } + } + ``` + +1. Execute a validation request for the asset. + + ```console + ./scalardl-java-client-sdk-3.6.0/bin/validate-ledger --properties ./client.properties --asset-id "test_asset" + ``` + + [Command execution result] + + ```console + { + "status_code" : "OK", + "Ledger" : { + "id" : "test_asset", + "age" : 0, + "nonce" : "f31599c6-e6b9-4b77-adc3-61cb5f119bd3", + "hash" : "9ExfFl5Lg9IQwdXdW9b87Bi+PWccn3OSNRbhmI/dboo=", + "signature" : "MEQCIG6Xa4WOWGMIIbA3PnCje4aAapYfCMerF54xRW0gaUuzAiBCA1nCAPoFWgxArB34/u9b+KeoxQBMALI/pOzMNoLExg==" + }, + "Auditor" : null + } + ``` + + * Reference information + * If the asset data is not tampered with, the validation request (validate-ledger command) returns `OK` as a result. + * If the asset data is tampered with (e.g. the `state` value in the DB is tampered with), the validation request (validate-ledger command) returns a value other than `OK` (e.g. `INVALID_OUTPUT`) as a result, like the following. + [Command execution result (If the asset data is tampered with)] + + ```console + { + "status_code" : "INVALID_OUTPUT", + "Ledger" : { + "id" : "test_asset", + "age" : 0, + "nonce" : "f31599c6-e6b9-4b77-adc3-61cb5f119bd3", + "hash" : "9ExfFl5Lg9IQwdXdW9b87Bi+PWccn3OSNRbhmI/dboo=", + "signature" : "MEQCIGtJerW7N93c/bvIBy/7NXxoQwGFznHMmV6RzsgHQg0dAiBu+eBxkfmMQKJY2d9fLNvCH+4b+9rl7gZ3OXJ2NYeVsA==" + }, + "Auditor" : null + } + ``` + + * In this way, the ScalarDL Ledger can detect data tampering. + +## Step 9. Delete all resources + +After completing the ScalarDL Ledger tests on the Kubernetes cluster, remove all resources. + +1. Uninstall ScalarDL Ledger, ScalarDL Schema Loader, and PostgreSQL. + + ```console + helm uninstall scalardl-ledger schema-loader-ledger postgresql-ledger + ``` + +1. Remove the Client container. + + ``` + kubectl delete pod scalardl-client --force --grace-period 0 + ``` + +1. Remove the working directory and sample files (configuration file, key, and certificate). + + ```console + cd ~ + ``` + + ```console + rm -rf ~/scalardl-test/ + ``` + +## Further reading + +You can see how to get started with monitoring or logging for Scalar products in the following documents. + +* [Getting Started with Helm Charts (Monitoring using Prometheus Operator)](./getting-started-monitoring.md) +* [Getting Started with Helm Charts (Logging using Loki Stack)](./getting-started-logging.md) +* [Getting Started with Helm Charts (Scalar Manager)](./getting-started-scalar-manager.md) diff --git a/docs/3.12/helm-charts/how-to-deploy-scalar-admin-for-kubernetes.md b/docs/3.12/helm-charts/how-to-deploy-scalar-admin-for-kubernetes.md new file mode 100644 index 00000000..20b9d06d --- /dev/null +++ b/docs/3.12/helm-charts/how-to-deploy-scalar-admin-for-kubernetes.md @@ -0,0 +1,27 @@ +# How to deploy Scalar Admin for Kubernetes + +This document explains how to deploy Scalar Admin for Kubernetes by using Scalar Helm Charts. For details on the custom values file for Scalar Admin for Kubernetes, see [Configure a custom values file for Scalar Admin for Kubernetes](./configure-custom-values-scalar-admin-for-kubernetes.md). + +## Deploy Scalar Admin for Kubernetes + +To deploy Scalar Admin for Kubernetes, run the following command, replacing the contents in the angle brackets as described: + +```console +helm install scalar-labs/scalar-admin-for-kubernetes -n -f / --version +``` + +## Upgrade a Scalar Admin for Kubernetes job + +To upgrade a Scalar Admin for Kubernetes job, run the following command, replacing the contents in the angle brackets as described: + +```console +helm upgrade scalar-labs/scalar-admin-for-kubernetes -n -f / --version +``` + +## Delete a Scalar Admin for Kubernetes job + +To delete a Scalar Admin for Kubernetes job, run the following command, replacing the contents in the angle brackets as described: + +```console +helm uninstall -n +``` diff --git a/docs/3.12/helm-charts/how-to-deploy-scalar-manager.md b/docs/3.12/helm-charts/how-to-deploy-scalar-manager.md new file mode 100644 index 00000000..9e592d40 --- /dev/null +++ b/docs/3.12/helm-charts/how-to-deploy-scalar-manager.md @@ -0,0 +1,46 @@ +# How to deploy Scalar Manager + +This document explains how to deploy Scalar Manager using Scalar Helm Charts. You must prepare your custom values file. Please refer to the following document for more details on the custom values file for Scalar Manager. + +* [Configure a custom values file for Scalar Manager](./configure-custom-values-scalar-manager.md) + +## Deploy kube-prometheus-stack and loki-stack + +When you use Scalar Manager, you must deploy kube-prometheus-stack and loki-stack. Please refer to the following documents for more details on how to deploy them. + +* [Getting Started with Helm Charts (Monitoring using Prometheus Operator)](https://github.com/scalar-labs/helm-charts/blob/main/docs/getting-started-monitoring.md) +* [Getting Started with Helm Charts (Logging using Loki Stack)](https://github.com/scalar-labs/helm-charts/blob/main/docs/getting-started-logging.md) + +When you deploy kube-prometheus-stack, you must set the following configuration in the custom values file for kube-prometheus-stack. + +```yaml +grafana: + grafana-ini: + security: + allow_embedding: true + cookie_samesite: disabled +``` + +If you already have a deployment of kube-prometheus-stack, please upgrade the configuration using the following command. + +```console +helm upgrade prometheus-community/kube-prometheus-stack -n -f / --version +``` + +## Deploy Scalar Manager + +```console +helm install scalar-labs/scalar-manager -n -f / --version +``` + +## Upgrade the deployment of Scalar Manager + +```console +helm upgrade scalar-labs/scalar-manager -n -f / --version +``` + +## Delete the deployment of Scalar Manager + +```console +helm uninstall -n +``` diff --git a/docs/3.12/helm-charts/how-to-deploy-scalar-products.md b/docs/3.12/helm-charts/how-to-deploy-scalar-products.md new file mode 100644 index 00000000..ba50c2a6 --- /dev/null +++ b/docs/3.12/helm-charts/how-to-deploy-scalar-products.md @@ -0,0 +1,60 @@ +# Deploy Scalar products using Scalar Helm Charts + +This document explains how to deploy Scalar products using Scalar Helm Charts. If you want to test Scalar products on your local environment using a minikube cluster, please refer to the following getting started guide. + +* [Getting Started with Scalar Helm Charts](./getting-started-scalar-helm-charts.md) + +## Prerequisites + +### Install the helm command + +You must install the helm command to use Scalar Helm Charts. Please install the helm command according to the [Helm document](https://helm.sh/docs/intro/install/). + +### Add the Scalar Helm Charts repository + +```console +helm repo add scalar-labs https://scalar-labs.github.io/helm-charts +``` +```console +helm repo update scalar-labs +``` + +### Prepare a Kubernetes cluster + +You must prepare a Kubernetes cluster for the deployment of Scalar products. If you use EKS (Amazon Elastic Kubernetes Service) or AKS (Azure Kubernetes Service) in the production environment. Please refer to the following document for more details. + +* [scalar-labs/scalar-kubernetes](https://github.com/scalar-labs/scalar-kubernetes/blob/master/README.md) + +You must prepare a supported version of Kubernetes. For versions that Scalar Helm Charts supports, see [Supported Kubernetes versions](https://github.com/scalar-labs/helm-charts#supported-kubernetes-versions). + +### Prepare a database (ScalarDB, ScalarDL Ledger, ScalarDL Auditor) + +You must prepare a database as a backend storage of ScalarDB/ScalarDL. You can see the supported databases by ScalarDB/ScalarDL in the following document. + +* [ScalarDB Supported Databases](https://github.com/scalar-labs/scalardb/blob/master/docs/scalardb-supported-databases.md) + +### Prepare a custom values file + +You must prepare your custom values file based on your environment. Please refer to the following documents for more details on how to create a custom values file. + +* [Configure a custom values file for Scalar Helm Charts](./configure-custom-values-file.md) + +### Create a Secret resource for authentication of the container registry (Optional) + +If you use a Kubernetes cluster other than EKS or AKS, you need to create a Secret resource that includes the credentials and set the Secret name to `imagePullSecrets[].name` in your custom values file. Please refer to the following documents for more details on creating the Secret resource and setting it in your custom values file. + +* [Deploy containers on Kubernetes other than EKS from AWS Marketplace using Scalar Helm Charts](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AwsMarketplaceGuide.md#byol-deploy-containers-on-kubernetes-other-than-eks-from-aws-marketplace-using-scalar-helm-charts) +* [Deploy containers on Kubernetes other than AKS (Azure Kubernetes Service) from your private container registry using Scalar Helm Charts](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/AzureMarketplaceGuide.md#deploy-containers-on-kubernetes-other-than-aks-azure-kubernetes-service-from-your-private-container-registry-using-scalar-helm-charts) + +## Deploy Scalar products + +Please refer to the following documents for more details on how to deploy each product. + +* [ScalarDB Cluster](./how-to-deploy-scalardb-cluster.md) +* [ScalarDB Analytics with PostgreSQL](./how-to-deploy-scalardb-analytics-postgresql.md) +* [ScalarDL Ledger](./how-to-deploy-scalardl-ledger.md) +* [ScalarDL Auditor](./how-to-deploy-scalardl-auditor.md) +* [Scalar Admin for Kubernetes](./how-to-deploy-scalar-admin-for-kubernetes.md) +* [Scalar Manager](./how-to-deploy-scalar-manager.md) +* [[Deprecated] ScalarDB Server](./how-to-deploy-scalardb.md) +* [[Deprecated] ScalarDB GraphQL](./how-to-deploy-scalardb-graphql.md) diff --git a/docs/3.12/helm-charts/how-to-deploy-scalardb-analytics-postgresql.md b/docs/3.12/helm-charts/how-to-deploy-scalardb-analytics-postgresql.md new file mode 100644 index 00000000..b2454de4 --- /dev/null +++ b/docs/3.12/helm-charts/how-to-deploy-scalardb-analytics-postgresql.md @@ -0,0 +1,35 @@ +# How to deploy ScalarDB Analytics with PostgreSQL + +This document explains how to deploy ScalarDB Analytics with PostgreSQL by using Scalar Helm Charts. For details on the custom values file for ScalarDB Analytics with PostgreSQL, see [Configure a custom values file for ScalarDB Analytics with PostgreSQL](./configure-custom-values-scalardb-analytics-postgresql.md). + +## Prepare a secret resource + +You must create a secret resource `scalardb-analytics-postgresql-superuser-password` with the key `superuser-password` that includes a superuser password for PostgreSQL before you deploy ScalarDB Analytics with PostgreSQL. Scalar Helm Chart mounts this secret resource and sets the `POSTGRES_PASSWORD` environment variable to the value of the `superuser-password` key. + +```console +kubectl create secret generic scalardb-analytics-postgresql-superuser-password --from-literal=superuser-password= -n +``` + +## Deploy ScalarDB Analytics with PostgreSQL + +To deploy ScalarDB Analytics with PostgreSQL, run the following command, replacing the contents in the angle brackets as described: + +```console +helm install scalar-labs/scalardb-analytics-postgresql -n -f / --version +``` + +## Upgrade a ScalarDB Analytics with PostgreSQL deployment + +To upgrade a ScalarDB Analytics with PostgreSQL deployment, run the following command, replacing the contents in the angle brackets as described: + +```console +helm upgrade scalar-labs/scalardb-analytics-postgresql -n -f / --version +``` + +## Delete a ScalarDB Analytics with PostgreSQL deployment + +To delete a ScalarDB Analytics with PostgreSQL deployment, run the following command, replacing the contents in the angle brackets as described: + +```console +helm uninstall -n +``` diff --git a/docs/3.12/helm-charts/how-to-deploy-scalardb-cluster.md b/docs/3.12/helm-charts/how-to-deploy-scalardb-cluster.md new file mode 100644 index 00000000..c45987a0 --- /dev/null +++ b/docs/3.12/helm-charts/how-to-deploy-scalardb-cluster.md @@ -0,0 +1,72 @@ +# How to deploy ScalarDB Cluster + +This document explains how to deploy ScalarDB Cluster by using Scalar Helm Charts. For details on the custom values file for ScalarDB Cluster, see [Configure a custom values file for ScalarDB Cluster](./configure-custom-values-scalardb-cluster.md). + +## Deploy ScalarDB Cluster + +```console +helm install scalar-labs/scalardb-cluster -n -f / --version +``` + +## Upgrade a ScalarDB Cluster deployment + +```console +helm upgrade scalar-labs/scalardb-cluster -n -f / --version +``` + +## Delete a ScalarDB Cluster deployment + +```console +helm uninstall -n +``` + +## Deploy your client application on Kubernetes with `direct-kubernetes` mode + +If you use ScalarDB Cluster with `direct-kubernetes` mode, you must: + +1. Deploy your application pods on the same Kubernetes cluster as ScalarDB Cluster. +2. Create three Kubernetes resources (`Role`, `RoleBinding`, and `ServiceAccount`). +3. Mount the `ServiceAccount` on your application pods. + +This method is necessary because the ScalarDB Cluster client library with `direct-kubernetes` mode runs the Kubernetes API from inside of your application pods to get information about the ScalarDB Cluster pods. + +* Role + + ```yaml + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: scalardb-cluster-client-role + namespace: + rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "watch", "list"] + ``` + +* RoleBinding + + ```yaml + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: scalardb-cluster-client-rolebinding + namespace: + subjects: + - kind: ServiceAccount + name: scalardb-cluster-client-sa + roleRef: + kind: Role + name: scalardb-cluster-role + apiGroup: rbac.authorization.k8s.io + ``` + +* ServiceAccount + + ```yaml + apiVersion: v1 + kind: ServiceAccount + metadata: + name: scalardb-cluster-client-sa + namespace: + ``` diff --git a/docs/3.12/helm-charts/how-to-deploy-scalardb-graphql.md b/docs/3.12/helm-charts/how-to-deploy-scalardb-graphql.md new file mode 100644 index 00000000..89db3cae --- /dev/null +++ b/docs/3.12/helm-charts/how-to-deploy-scalardb-graphql.md @@ -0,0 +1,41 @@ +# [Deprecated] How to deploy ScalarDB GraphQL + +{% capture notice--info %} +**Note** + +ScalarDB GraphQL Server is now deprecated. Please use [ScalarDB Cluster](./how-to-deploy-scalardb-cluster.md) instead. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +This document explains how to deploy ScalarDB GraphQL using Scalar Helm Charts. You must prepare your custom values file. Please refer to the following document for more details on the custom values file for ScalarDB GraphQL. + +* [[Deprecated] Configure a custom values file for ScalarDB GraphQL](./configure-custom-values-scalardb-graphql.md) + +## Deploy ScalarDB Server (recommended option) + +When you deploy ScalarDB GraphQL, it is recommended to deploy ScalarDB Server between ScalarDB GraphQL and backend databases as follows. + +``` +[Client] ---> [ScalarDB GraphQL] ---> [ScalarDB Server] ---> [Backend databases] +``` + +Please deploy ScalarDB Server before you deploy ScalarDB GraphQL according to the document [How to deploy ScalarDB Server](./how-to-deploy-scalardb.md). + +## Deploy ScalarDB GraphQL + +```console +helm install scalar-labs/scalardb-graphql -n -f / --version +``` + +## Upgrade the deployment of ScalarDB GraphQL + +```console +helm upgrade scalar-labs/scalardb-graphql -n -f / --version +``` + +## Delete the deployment of ScalarDB GraphQL + +```console +helm uninstall -n +``` diff --git a/docs/3.12/helm-charts/how-to-deploy-scalardb.md b/docs/3.12/helm-charts/how-to-deploy-scalardb.md new file mode 100644 index 00000000..8482477d --- /dev/null +++ b/docs/3.12/helm-charts/how-to-deploy-scalardb.md @@ -0,0 +1,31 @@ +# [Deprecated] How to deploy ScalarDB Server + +{% capture notice--info %} +**Note** + +ScalarDB Server is now deprecated. Please use [ScalarDB Cluster](./how-to-deploy-scalardb-cluster.md) instead. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +This document explains how to deploy ScalarDB Server using Scalar Helm Charts. You must prepare your custom values file. Please refer to the following document for more details on the custom values file for ScalarDB Server. + +* [[Deprecated] Configure a custom values file for ScalarDB Server](./configure-custom-values-scalardb.md) + +## Deploy ScalarDB Server + +```console +helm install scalar-labs/scalardb -n -f / --version +``` + +## Upgrade the deployment of ScalarDB Server + +```console +helm upgrade scalar-labs/scalardb -n -f / --version +``` + +## Delete the deployment of ScalarDB Server + +```console +helm uninstall -n +``` diff --git a/docs/3.12/helm-charts/how-to-deploy-scalardl-auditor.md b/docs/3.12/helm-charts/how-to-deploy-scalardl-auditor.md new file mode 100644 index 00000000..085d0f09 --- /dev/null +++ b/docs/3.12/helm-charts/how-to-deploy-scalardl-auditor.md @@ -0,0 +1,38 @@ +# How to deploy ScalarDL Auditor + +This document explains how to deploy ScalarDL Auditor using Scalar Helm Charts. You must prepare your custom values file. Please refer to the following document for more details on the custom values file for ScalarDL Auditor and ScalarDL Schema Loader. + +* [Configure a custom values file for ScalarDL Auditor](./configure-custom-values-scalardl-auditor.md) +* [Configure a custom values file for ScalarDL Schema Loader](./configure-custom-values-scalardl-schema-loader.md) + +## Prepare a private key file and a certificate file + +When you deploy ScalarDL Auditor, you must create a Secrete resource to mount the private key file and the certificate file on the ScalarDL Auditor pods. + +For more details on how to mount the key and certificate files on the ScalarDL pods, refer to [Mount key and certificate files on a pod in ScalarDL Helm Charts](./mount-files-or-volumes-on-scalar-pods.md#mount-key-and-certificate-files-on-a-pod-in-scalardl-helm-charts). + +## Create schemas for ScalarDL Auditor (Deploy ScalarDL Schema Loader) + +Before you deploy ScalarDL Auditor, you must create schemas for ScalarDL Auditor on the backend database. + +```console +helm install scalar-labs/schema-loading -n -f / --version +``` + +## Deploy ScalarDL Auditor + +```console +helm install scalar-labs/scalardl-audit -n -f / --version +``` + +## Upgrade the deployment of ScalarDL Auditor + +```console +helm upgrade scalar-labs/scalardl-audit -n -f / --version +``` + +## Delete the deployment of ScalarDL Auditor and ScalarDL Schema Loader + +```console +helm uninstall -n +``` diff --git a/docs/3.12/helm-charts/how-to-deploy-scalardl-ledger.md b/docs/3.12/helm-charts/how-to-deploy-scalardl-ledger.md new file mode 100644 index 00000000..0ae71db7 --- /dev/null +++ b/docs/3.12/helm-charts/how-to-deploy-scalardl-ledger.md @@ -0,0 +1,40 @@ +# How to deploy ScalarDL Ledger + +This document explains how to deploy ScalarDL Ledger using Scalar Helm Charts. You must prepare your custom values file. Please refer to the following document for more details on the custom values file for ScalarDL Ledger and ScalarDL Schema Loader. + +* [Configure a custom values file for ScalarDL Ledger](./configure-custom-values-scalardl-ledger.md) +* [Configure a custom values file for ScalarDL Schema Loader](./configure-custom-values-scalardl-schema-loader.md) + +## Prepare a private key file (optional / it is necessary if you use ScalarDL Auditor) + +If you use the [asset proofs](https://github.com/scalar-labs/scalardl/blob/master/docs/how-to-use-proof.md) of ScalarDL Ledger, you must create a Secrete resource to mount the private key file on the ScalarDL Ledger pods. If you use ScalarDL Auditor, asset proof is necessary. + +Please refer to the following document for more details on how to mount the key/certificate files on the ScalarDL pods. + +* [Mount key and certificate files on a pod in ScalarDL Helm Charts](./mount-files-or-volumes-on-scalar-pods.md#mount-key-and-certificate-files-on-a-pod-in-scalardl-helm-charts) + +## Create schemas for ScalarDL Ledger (Deploy ScalarDL Schema Loader) + +Before you deploy ScalarDL Ledger, you must create schemas for ScalarDL Ledger on the backend database. + +```console +helm install scalar-labs/schema-loading -n -f / --version +``` + +## Deploy ScalarDL Ledger + +```console +helm install scalar-labs/scalardl -n -f / --version +``` + +## Upgrade the deployment of ScalarDL Ledger + +```console +helm upgrade scalar-labs/scalardl -n -f / --version +``` + +## Delete the deployment of ScalarDL Ledger and ScalarDL Schema Loader + +```console +helm uninstall -n +``` diff --git a/docs/3.12/helm-charts/mount-files-or-volumes-on-scalar-pods.md b/docs/3.12/helm-charts/mount-files-or-volumes-on-scalar-pods.md new file mode 100644 index 00000000..a89610ca --- /dev/null +++ b/docs/3.12/helm-charts/mount-files-or-volumes-on-scalar-pods.md @@ -0,0 +1,133 @@ +# Mount any files or volumes on Scalar product pods + +You can mount any files or volumes on Scalar product pods when you use ScalarDB Server, ScalarDB Cluster, ScalarDB Analytics with PostgreSQL, or ScalarDL Helm Charts (ScalarDL Ledger and ScalarDL Auditor). + +## Mount key and certificate files on a pod in ScalarDL Helm Charts + +You must mount the key and certificate files to run ScalarDL Auditor. + +* Configuration example + * ScalarDL Ledger + + ```yaml + ledger: + ledgerProperties: | + ... + scalar.dl.ledger.proof.enabled=true + scalar.dl.ledger.auditor.enabled=true + scalar.dl.ledger.proof.private_key_path=/keys/private-key + ``` + + * ScalarDL Auditor + + ```yaml + auditor: + auditorProperties: | + ... + scalar.dl.auditor.private_key_path=/keys/private-key + scalar.dl.auditor.cert_path=/keys/certificate + ``` + +In this example, you need to mount a **private-key** and a **certificate** file under the `/keys` directory in the container. And, you need to mount files named `private-key` and `certificate`. You can use `extraVolumes` and `extraVolumeMounts` to mount these files. + +1. Set `extraVolumes` and `extraVolumeMounts` in the custom values file using the same syntax of Kubernetes manifest. You need to specify the directory name to the key `mountPath`. + * Example + * ScalarDL Ledger + + ```yaml + ledger: + extraVolumes: + - name: ledger-keys + secret: + secretName: ledger-keys + extraVolumeMounts: + - name: ledger-keys + mountPath: /keys + readOnly: true + ``` + + * ScalarDL Auditor + + ```yaml + auditor: + extraVolumes: + - name: auditor-keys + secret: + secretName: auditor-keys + extraVolumeMounts: + - name: auditor-keys + mountPath: /keys + readOnly: true + ``` + +1. Create a `Secret` resource that includes key and certificate files. + + You need to specify the file name as keys of `Secret`. + + * Example + * ScalarDL Ledger + + ```console + kubectl create secret generic ledger-keys \ + --from-file=private-key=./ledger-key.pem + ``` + + * ScalarDL Auditor + + ```console + kubectl create secret generic auditor-keys \ + --from-file=private-key=./auditor-key.pem \ + --from-file=certificate=./auditor-cert.pem + ``` + +1. Deploy Scalar products with the above custom values file. + + After deploying Scalar products, key and certificate files are mounted under the `/keys` directory as follows. + + * Example + * ScalarDL Ledger + + ```console + $ ls -l /keys/ + total 0 + lrwxrwxrwx 1 root root 18 Jun 27 03:12 private-key -> ..data/private-key + ``` + + * ScalarDL Auditor + + ```console + $ ls -l /keys/ + total 0 + lrwxrwxrwx 1 root root 18 Jun 27 03:16 certificate -> ..data/certificate + lrwxrwxrwx 1 root root 18 Jun 27 03:16 private-key -> ..data/private-key + ``` + +## Mount emptyDir to get a heap dump file + +You can mount emptyDir to Scalar product pods by using the following keys in your custom values file. For example, you can use this volume to get a heap dump of Scalar products. + +* Keys + * `scalardb.extraVolumes` / `scalardb.extraVolumeMounts` (ScalarDB Server) + * `scalardbCluster.extraVolumes` / `scalardbCluster.extraVolumeMounts` (ScalarDB Cluster) + * `scalardbAnalyticsPostgreSQL.extraVolumes` / `scalardbAnalyticsPostgreSQL.extraVolumeMounts` (ScalarDB Analytics with PostgreSQL) + * `ledger.extraVolumes` / `ledger.extraVolumeMounts` (ScalarDL Ledger) + * `auditor.extraVolumes` / `auditor.extraVolumeMounts` (ScalarDL Auditor) + +* Example (ScalarDB Server) + + ```yaml + scalardb: + extraVolumes: + - name: heap-dump + emptyDir: {} + extraVolumeMounts: + - name: heap-dump + mountPath: /dump + ``` + +In this example, you can see the mounted volume in the ScalarDB Server pod as follows. + +```console +$ ls -ld /dump +drwxrwxrwx 2 root root 4096 Feb 6 07:43 /dump +``` diff --git a/docs/3.12/helm-charts/use-secret-for-credentials.md b/docs/3.12/helm-charts/use-secret-for-credentials.md new file mode 100644 index 00000000..23ce8911 --- /dev/null +++ b/docs/3.12/helm-charts/use-secret-for-credentials.md @@ -0,0 +1,203 @@ +# How to use Secret resources to pass credentials as environment variables into the properties file + +You can pass credentials like **username** or **password** as environment variables via a `Secret` resource in Kubernetes. The docker images for previous versions of Scalar products use the `dockerize` command for templating properties files. The docker images for the latest versions of Scalar products get values directly from environment variables. + +Note: You cannot use the following environment variable names in your custom values file since these are used in the Scalar Helm Chart internal. + +```console +HELM_SCALAR_DB_CONTACT_POINTS +HELM_SCALAR_DB_CONTACT_PORT +HELM_SCALAR_DB_USERNAME +HELM_SCALAR_DB_PASSWORD +HELM_SCALAR_DB_STORAGE +HELM_SCALAR_DL_LEDGER_PROOF_ENABLED +HELM_SCALAR_DL_LEDGER_AUDITOR_ENABLED +HELM_SCALAR_DL_LEDGER_PROOF_PRIVATE_KEY_PATH +HELM_SCALAR_DL_AUDITOR_SERVER_PORT +HELM_SCALAR_DL_AUDITOR_SERVER_PRIVILEGED_PORT +HELM_SCALAR_DL_AUDITOR_SERVER_ADMIN_PORT +HELM_SCALAR_DL_AUDITOR_LEDGER_HOST +HELM_SCALAR_DL_AUDITOR_CERT_HOLDER_ID +HELM_SCALAR_DL_AUDITOR_CERT_VERSION +HELM_SCALAR_DL_AUDITOR_CERT_PATH +HELM_SCALAR_DL_AUDITOR_PRIVATE_KEY_PATH +SCALAR_DB_LOG_LEVEL +SCALAR_DL_LEDGER_LOG_LEVEL +SCALAR_DL_AUDITOR_LOG_LEVEL +SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAMESPACE_NAME +SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAME +``` + +1. Set environment variable name to the properties configuration in the custom values file. + * Example + * ScalarDB Server + * ScalarDB Server 3.7 or earlier (Go template syntax) + + {% raw %} + ```yaml + scalardb: + databaseProperties: | + ... + scalar.db.username={{ default .Env.SCALAR_DB_USERNAME "" }} + scalar.db.password={{ default .Env.SCALAR_DB_PASSWORD "" }} + ... + ``` + {% endraw %} + + * ScalarDB Server 3.8 or later (Apache Commons Text syntax) + + ```yaml + scalardb: + databaseProperties: | + ... + scalar.db.username=${env:SCALAR_DB_USERNAME} + scalar.db.password=${env:SCALAR_DB_PASSWORD} + ... + ``` + + * ScalarDB Cluster + + ```yaml + scalardbCluster: + scalardbClusterNodeProperties: | + ... + scalar.db.username=${env:SCALAR_DB_USERNAME} + scalar.db.password=${env:SCALAR_DB_PASSWORD} + ... + ``` + + * ScalarDB Analytics with PostgreSQL + ```yaml + scalardbAnalyticsPostgreSQL: + databaseProperties: | + ... + scalar.db.username=${env:SCALAR_DB_USERNAME} + scalar.db.password=${env:SCALAR_DB_PASSWORD} + ... + ``` + * ScalarDL Ledger (Go template syntax) + + {% raw %} + ```yaml + ledger: + ledgerProperties: | + ... + scalar.db.username={{ default .Env.SCALAR_DB_USERNAME "" }} + scalar.db.password={{ default .Env.SCALAR_DB_PASSWORD "" }} + ... + ``` + {% endraw %} + + * ScalarDL Auditor (Go template syntax) + + {% raw %} + ```yaml + auditor: + auditorProperties: | + ... + scalar.db.username={{ default .Env.SCALAR_DB_USERNAME "" }} + scalar.db.password={{ default .Env.SCALAR_DB_PASSWORD "" }} + ... + ``` + {% endraw %} + + * ScalarDL Schema Loader (Go template syntax) + + {% raw %} + ```yaml + schemaLoading: + databaseProperties: | + ... + scalar.db.username={{ default .Env.SCALAR_DB_USERNAME "" }} + scalar.db.password={{ default .Env.SCALAR_DB_PASSWORD "" }} + ... + ``` + {% endraw %} + +1. Create a `Secret` resource that includes credentials. + You need to specify the environment variable name as keys of the `Secret`. + * Example + + ```console + kubectl create secret generic scalardb-credentials-secret \ + --from-literal=SCALAR_DB_USERNAME=postgres \ + --from-literal=SCALAR_DB_PASSWORD=postgres + ``` + +1. Set the `Secret` name to the following keys in the custom values file. + * Keys + * `scalardb.secretName` (ScalarDB Server) + * `scalardbCluster.secretName` (ScalarDB Cluster) + * `scalardbAnalyticsPostgreSQL.secretName` (ScalarDB Analytics with PostgreSQL) + * `ledger.secretName` (ScalarDL Ledger) + * `auditor.secretName` (ScalarDL Auditor) + * `schemaLoading.secretName` (ScalarDL Schema Loader) + * Example + * ScalarDB Server + + ```yaml + scalardb: + secretName: "scalardb-credentials-secret" + ``` + + * ScalarDB Cluster + + ```yaml + scalardbCluster: + secretName: "scalardb-cluster-credentials-secret" + ``` + + * ScalarDB Analytics with PostgreSQL + ```yaml + scalardbAnalyticsPostgreSQL: + secretName: scalardb-analytics-postgresql-credentials-secret + ``` + * ScalarDL Ledger + + ```yaml + ledger: + secretName: "ledger-credentials-secret" + ``` + + * ScalarDL Auditor + + ```yaml + auditor: + secretName: "auditor-credentials-secret" + ``` + + * ScalarDL Schema Loader + + ```yaml + schemaLoading: + secretName: "schema-loader-ledger-credentials-secret" + ``` + +1. Deploy Scalar products with the above custom values file. + + After deploying Scalar products, the Go template strings (environment variables) are replaced by the values of the `Secret`. + + * Example + * Custom values file + + {% raw %} + ```yaml + scalardb: + databaseProperties: | + scalar.db.contact_points=jdbc:postgresql://postgresql-scalardb.default.svc.cluster.local:5432/postgres + scalar.db.username={{ default .Env.SCALAR_DB_USERNAME "" }} + scalar.db.password={{ default .Env.SCALAR_DB_PASSWORD "" }} + scalar.db.storage=jdbc + ``` + {% endraw %} + + * Properties file in containers + + ```properties + scalar.db.contact_points=jdbc:postgresql://postgresql-scalardb.default.svc.cluster.local:5432/postgres + scalar.db.username=postgres + scalar.db.password=postgres + scalar.db.storage=jdbc + ``` + + If you use Apache Commons Text syntax, Scalar products get values directly from environment variables. diff --git a/docs/3.12/images/data_model.png b/docs/3.12/images/data_model.png new file mode 100644 index 00000000..15a0e4d4 Binary files /dev/null and b/docs/3.12/images/data_model.png differ diff --git a/docs/3.12/images/software_stack.png b/docs/3.12/images/software_stack.png new file mode 100644 index 00000000..75fba6e6 Binary files /dev/null and b/docs/3.12/images/software_stack.png differ diff --git a/docs/3.12/images/two_phase_commit_load_balancing.png b/docs/3.12/images/two_phase_commit_load_balancing.png new file mode 100644 index 00000000..5cdc26f0 Binary files /dev/null and b/docs/3.12/images/two_phase_commit_load_balancing.png differ diff --git a/docs/3.12/images/two_phase_commit_sequence_diagram.png b/docs/3.12/images/two_phase_commit_sequence_diagram.png new file mode 100644 index 00000000..116ef635 Binary files /dev/null and b/docs/3.12/images/two_phase_commit_sequence_diagram.png differ diff --git a/docs/3.12/index.md b/docs/3.12/index.md new file mode 100644 index 00000000..a0c5f239 --- /dev/null +++ b/docs/3.12/index.md @@ -0,0 +1,83 @@ +# ScalarDB + +[![CI](https://github.com/scalar-labs/scalardb/actions/workflows/ci.yaml/badge.svg?branch=master)](https://github.com/scalar-labs/scalardb/actions/workflows/ci.yaml) + +ScalarDB is a universal transaction manager that achieves: +- database/storage-agnostic ACID transactions in a scalable manner even if an underlying database or storage is not ACID-compliant. +- multi-storage/database/service ACID transactions that can span multiple (possibly different) databases, storages, and services. + +## Install +The library is available on [maven central repository](https://mvnrepository.com/artifact/com.scalar-labs/scalardb). +You can install it in your application using your build tool such as Gradle and Maven. + +To add a dependency on ScalarDB using Gradle, use the following: +```gradle +dependencies { + implementation 'com.scalar-labs:scalardb:3.12.0' +} +``` + +To add a dependency using Maven: +```xml + + com.scalar-labs + scalardb + 3.12.0 + +``` + +## Docs +* [Getting started](getting-started-with-scalardb.md) +* [Java API Guide](api-guide.md) +* [ScalarDB Samples](https://github.com/scalar-labs/scalardb-samples) +* [ScalarDB Server](scalardb-server.md) +* [Multi-storage Transactions](multi-storage-transactions.md) +* [Two-phase Commit Transactions](two-phase-commit-transactions.md) +* [Design document](design.md) +* [Schema Loader](schema-loader.md) +* [Requirements and Recommendations for the Underlying Databases of ScalarDB](requirements.md) +* [How to Back up and Restore](backup-restore.md) +* [ScalarDB supported databases](scalardb-supported-databases.md) +* [Configurations](configurations.md) +* [Storage abstraction](storage-abstraction.md) +* Slides + * [Making Cassandra more capable, faster, and more reliable](https://speakerdeck.com/scalar/making-cassandra-more-capable-faster-and-more-reliable-at-apachecon-at-home-2020) at ApacheCon@Home 2020 + * [Scalar DB: A library that makes non-ACID databases ACID-compliant](https://speakerdeck.com/scalar/scalar-db-a-library-that-makes-non-acid-databases-acid-compliant) at Database Lounge Tokyo #6 2020 + * [Transaction Management on Cassandra](https://speakerdeck.com/scalar/transaction-management-on-cassandra) at Next Generation Cassandra Conference / ApacheCon NA 2019 +* Javadoc + * [scalardb](https://javadoc.io/doc/com.scalar-labs/scalardb/latest/index.html) - ScalarDB: A universal transaction manager that achieves database-agnostic transactions and distributed transactions that span multiple databases + * [scalardb-rpc](https://javadoc.io/doc/com.scalar-labs/scalardb-rpc/latest/index.html) - ScalarDB RPC libraries + * [scalardb-server](https://javadoc.io/doc/com.scalar-labs/scalardb-server/latest/index.html) - ScalarDB Server: A gRPC interface of ScalarDB + * [scalardb-schema-loader](https://javadoc.io/doc/com.scalar-labs/scalardb-schema-loader/latest/index.html) - ScalarDB Schema Loader: A tool for schema creation and schema deletion in ScalarDB +* [Jepsen tests](https://github.com/scalar-labs/scalar-jepsen) +* [TLA+](https://github.com/scalar-labs/scalardb/tree/master/tla+/consensus-commit) + +## Contributing +This library is mainly maintained by the Scalar Engineering Team, but of course we appreciate any help. + +* For asking questions, finding answers and helping other users, please go to [stackoverflow](https://stackoverflow.com/) and use [scalardb](https://stackoverflow.com/questions/tagged/scalardb) tag. +* For filing bugs, suggesting improvements, or requesting new features, help us out by opening an issue. + +Here are the contributors we are especially thankful for: +- [Toshihiro Suzuki](https://github.com/brfrn169) - created [Phoenix adapter](https://github.com/scalar-labs/scalardb-phoenix) for ScalarDB +- [Yonezawa-T2](https://github.com/Yonezawa-T2) - reported bugs around Serializable and proposed a new Serializable strategy (now named Extra-Read) + +## Development + +### Pre-commit hook + +This project uses [pre-commit](https://pre-commit.com/) to automate code format and so on as much as possible. If you're interested in the development of ScalarDB, please [install pre-commit](https://pre-commit.com/#installation) and the git hook script as follows. + +``` +$ ls -a .pre-commit-config.yaml +.pre-commit-config.yaml +$ pre-commit install +``` + +The code formatter is automatically executed when committing files. A commit will fail and be formatted by the formatter when any invalid code format is detected. Try to commit the change again. + +## License +ScalarDB is dual-licensed under both the Apache 2.0 License (found in the LICENSE file in the root directory) and a commercial license. +You may select, at your option, one of the above-listed licenses. +The commercial license includes several enterprise-grade features such as management tools and declarative query interfaces like GraphQL and SQL interfaces. +Regarding the commercial license, please [contact us](https://scalar-labs.com/contact_us/) for more information. diff --git a/docs/3.12/multi-storage-transactions.md b/docs/3.12/multi-storage-transactions.md new file mode 100644 index 00000000..018c808b --- /dev/null +++ b/docs/3.12/multi-storage-transactions.md @@ -0,0 +1,60 @@ +# Multi-Storage Transactions + +ScalarDB transactions can span multiple storages or databases while maintaining ACID compliance by using a feature called *multi-storage transactions*. + +This page explains how multi-storage transactions work and how to configure the feature in ScalarDB. + +## How multi-storage transactions work in ScalarDB + +In ScalarDB, the `multi-storage` implementation holds multiple storage instances and has mappings from a namespace name to a proper storage instance. When an operation is executed, the multi-storage transactions feature chooses a proper storage instance from the specified namespace by using the namespace-storage mapping and uses that storage instance. + +## How to configure ScalarDB to support multi-storage transactions + +To enable multi-storage transactions, you need to specify `consensus-commit` as the value for `scalar.db.transaction_manager`, `multi-storage` as the value for `scalar.db.storage`, and configure your databases in the ScalarDB properties file. + +The following is an example of configurations for multi-storage transactions: + +```properties +# Consensus Commit is required to support multi-storage transactions. +scalar.db.transaction_manager=consensus-commit + +# Multi-storage implementation is used for Consensus Commit. +scalar.db.storage=multi-storage + +# Define storage names by using a comma-separated format. +# In this case, "cassandra" and "mysql" are used. +scalar.db.multi_storage.storages=cassandra,mysql + +# Define the "cassandra" storage. +# When setting storage properties, such as `storage`, `contact_points`, `username`, and `password`, for multi-storage transactions, the format is `scalar.db.multi_storage.storages..`. +# For example, to configure the `scalar.db.contact_points` property for Cassandra, specify `scalar.db.multi_storage.storages.cassandra.contact_point`. +scalar.db.multi_storage.storages.cassandra.storage=cassandra +scalar.db.multi_storage.storages.cassandra.contact_points=localhost +scalar.db.multi_storage.storages.cassandra.username=cassandra +scalar.db.multi_storage.storages.cassandra.password=cassandra + +# Define the "mysql" storage. +# When defining JDBC-specific configurations for multi-storage transactions, you can follow a similar format of `scalar.db.multi_storage.storages..`. +# For example, to configure the `scalar.db.jdbc.connection_pool.min_idle` property for MySQL, specify `scalar.db.multi_storage.storages.mysql.jdbc.connection_pool.min_idle`. +scalar.db.multi_storage.storages.mysql.storage=jdbc +scalar.db.multi_storage.storages.mysql.contact_points=jdbc:mysql://localhost:3306/ +scalar.db.multi_storage.storages.mysql.username=root +scalar.db.multi_storage.storages.mysql.password=mysql +# Define the JDBC-specific configurations for the "mysql" storage. +scalar.db.multi_storage.storages.mysql.jdbc.connection_pool.min_idle=5 +scalar.db.multi_storage.storages.mysql.jdbc.connection_pool.max_idle=10 +scalar.db.multi_storage.storages.mysql.jdbc.connection_pool.max_total=25 + +# Define namespace mapping from a namespace name to a storage. +# The format is ":,...". +scalar.db.multi_storage.namespace_mapping=user:cassandra,coordinator:mysql + +# Define the default storage that's used if a specified table doesn't have any mapping. +scalar.db.multi_storage.default_storage=cassandra +``` + +For additional configurations, see [ScalarDB Configurations](configurations.md). + +## Hands-on tutorial + +For a hands-on tutorial, see [Create a Sample Application That Supports Multi-Storage Transactions](https://github.com/scalar-labs/scalardb-samples/tree/main/multi-storage-transaction-sample). diff --git a/docs/3.12/requirements.md b/docs/3.12/requirements.md new file mode 100644 index 00000000..98d86715 --- /dev/null +++ b/docs/3.12/requirements.md @@ -0,0 +1,51 @@ +# Requirements and Recommendations for the Underlying Databases of ScalarDB + +This document explains the requirements and recommendations in the underlying databases of ScalarDB to make ScalarDB applications work correctly. + +## Common requirements + +This section describes common requirements for the underlying databases when using ScalarDB. + +### Privileges to access the underlying databases + +ScalarDB operates the underlying databases not only for CRUD operations but also for performing operations like creating or altering schemas, tables, or indexes. Thus, ScalarDB basically requires a fully privileged account to access the underlying databases. + +## Cassandra or Cassandra-compatible database requirements + +The following are requirements to make ScalarDB on Cassandra or Cassandra-compatible databases work properly and for storage operations with `LINEARIZABLE` to provide linearizability and for transaction operations with `SERIALIZABLE` to provide strict serializability. + +### Ensure durability in Cassandra + +In **cassandra.yaml**, you must change `commitlog_sync` from the default `periodic` to `batch` or `group` to ensure durability in Cassandra. + +ScalarDB provides only the atomicity and isolation properties of ACID and requests the underlying databases to provide durability. Although you can specify `periodic`, we do not recommend doing so unless you know exactly what you are doing. + +### Confirm that the Cassandra-compatible database supports lightweight transactions (LWTs) + +You must use a Cassandra-compatible database that supports LWTs. + +ScalarDB does not work on some Cassandra-compatible databases that do not support LWTs, such as [Amazon Keyspaces](https://aws.amazon.com/keyspaces/). This is because the Consensus Commit transaction manager relies on the linearizable operations of underlying databases to make transactions serializable. + +## CosmosDB database requirements + +In your Azure CosmosDB account, you must set the **default consistency level** to **Strong**. + +Consensus Commit, the ScalarDB transaction protocol, requires linearizable reads. By setting the **default consistency level** to **Strong**, CosmosDB can guarantee linearizability. + +For instructions on how to configure this setting, see the official documentation at [Configure the default consistency level](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/how-to-manage-consistency#configure-the-default-consistency-level). + +## JDBC database recommendations + +In ScalarDB on JDBC databases, you can't choose a consistency level (`LINEARIZABLE`, `SEQUENTIAL` or `EVENTUAL`) in your code by using the `Operation.withConsistency()` method. In addition, the consistency level depends on the setup of your JDBC database. + +For example, if you have asynchronous read replicas in your setup and perform read operations against them, the consistency will be eventual because you can read stale data from the read replicas. On the other hand, if you perform all operations against a single master instance, the consistency will be linearizable. + +With this in mind, you must perform all operations or transactions against a single master instance so that you can achieve linearizability and avoid worrying about consistency issues in your application. In other words, ScalarDB does not support read replicas. + +{% capture notice--info %} +**Note** + +You can still use a read replica as a backup and standby even when following this guideline. +{% endcapture %} + +
{{ notice--info | markdownify }}
diff --git a/docs/3.12/scalar-kubernetes/AccessScalarProducts.md b/docs/3.12/scalar-kubernetes/AccessScalarProducts.md new file mode 100644 index 00000000..b5f3d13e --- /dev/null +++ b/docs/3.12/scalar-kubernetes/AccessScalarProducts.md @@ -0,0 +1,203 @@ +# Make ScalarDB or ScalarDL deployed in a Kubernetes cluster environment available from applications + +This document explains how to make ScalarDB or ScalarDL deployed in a Kubernetes cluster environment available from applications. To make ScalarDB or ScalarDL available from applications, you can use Scalar Envoy via a Kubernetes service resource named `-envoy`. You can use `-envoy` in several ways, such as: + +* Directly from inside the same Kubernetes cluster as ScalarDB or ScalarDL. +* Via a load balancer from outside the Kubernetes cluster. +* From a bastion server by using the `kubectl port-forward` command (for testing purposes only). + +The resource name `-envoy` is decided based on the helm release name. You can see the helm release name by running the `helm list` command. + +```console +$ helm list -n ns-scalar +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +scalardb ns-scalar 1 2023-02-09 19:31:40.527130674 +0900 JST deployed scalardb-2.5.0 3.8.0 +scalardl-auditor ns-scalar 1 2023-02-09 19:32:03.008986045 +0900 JST deployed scalardl-audit-2.5.1 3.7.1 +scalardl-ledger ns-scalar 1 2023-02-09 19:31:53.459548418 +0900 JST deployed scalardl-4.5.1 3.7.1 +``` + +You can also see the envoy service name `-envoy` by running the `kubectl get service` command. + +```console +$ kubectl get service -n ns-scalar +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +scalardb-envoy LoadBalancer 10.99.245.143 60051:31110/TCP 2m2s +scalardb-envoy-metrics ClusterIP 10.104.56.87 9001/TCP 2m2s +scalardb-headless ClusterIP None 60051/TCP 2m2s +scalardb-metrics ClusterIP 10.111.213.194 8080/TCP 2m2s +scalardl-auditor-envoy LoadBalancer 10.111.141.43 40051:31553/TCP,40052:31171/TCP 99s +scalardl-auditor-envoy-metrics ClusterIP 10.104.245.188 9001/TCP 99s +scalardl-auditor-headless ClusterIP None 40051/TCP,40053/TCP,40052/TCP 99s +scalardl-auditor-metrics ClusterIP 10.105.119.158 8080/TCP 99s +scalardl-ledger-envoy LoadBalancer 10.96.239.167 50051:32714/TCP,50052:30857/TCP 109s +scalardl-ledger-envoy-metrics ClusterIP 10.97.204.18 9001/TCP 109s +scalardl-ledger-headless ClusterIP None 50051/TCP,50053/TCP,50052/TCP 109s +scalardl-ledger-metrics ClusterIP 10.104.216.189 8080/TCP 109s +``` + +## Run application (client) requests to ScalarDB or ScalarDL via service resources directly from inside the same Kubernetes cluster + +If you deploy your application (client) in the same Kubernetes cluster as ScalarDB or ScalarDL (for example, if you deploy your application [client] on another node group or pool in the same Kubernetes cluster), the application can access ScalarDB or ScalarDL by using Kubernetes service resources. The format of the service resource name (FQDN) is `-envoy..svc.cluster.local`. + +The following are examples of ScalarDB and ScalarDL deployments on the `ns-scalar` namespace: + +* **ScalarDB Server** + + ```console + scalardb-envoy.ns-scalar.svc.cluster.local + ``` + +* **ScalarDL Ledger** + + ```console + scalardl-ledger-envoy.ns-scalar.svc.cluster.local + ``` + +* **ScalarDL Auditor** + + ```console + scalardl-auditor-envoy.ns-scalar.svc.cluster.local + ``` + +When using the Kubernetes service resource, you must set the above FQDN in the properties file for the application (client) as follows: + +* **Client properties file for ScalarDB Server** + + ```properties + scalar.db.contact_points=-envoy..svc.cluster.local + scalar.db.contact_port=60051 + scalar.db.storage=grpc + scalar.db.transaction_manager=grpc + ``` + +* **Client properties file for ScalarDL Ledger** + + ```properties + scalar.dl.client.server.host=-envoy..svc.cluster.local + scalar.dl.ledger.server.port=50051 + scalar.dl.ledger.server.privileged_port=50052 + ``` + +* **Client properties file for ScalarDL Ledger with ScalarDL Auditor mode enabled** + + ```properties + # Ledger + scalar.dl.client.server.host=-envoy..svc.cluster.local + scalar.dl.ledger.server.port=50051 + scalar.dl.ledger.server.privileged_port=50052 + + # Auditor + scalar.dl.client.auditor.enabled=true + scalar.dl.client.auditor.host=-envoy..svc.cluster.local + scalar.dl.auditor.server.port=40051 + scalar.dl.auditor.server.privileged_port=40052 + ``` + +## Run application (client) requests to ScalarDB or ScalarDL via load balancers from outside the Kubernetes cluster + +If you deploy your application (client) in an environment outside the Kubernetes cluster for ScalarDB or ScalarDL (for example, if you deploy your application [client] on another Kubernetes cluster, container platform, or server), the application can access ScalarDB or ScalarDL by using a load balancer that each cloud service provides. + +You can create a load balancer by setting `envoy.service.type` to `LoadBalancer` in your custom values file. After configuring the custom values file, you can use Scalar Envoy through a Kubernetes service resource by using the load balancer. You can also set the load balancer configurations by using annotations. + +For more details on how to configure your custom values file, see [Service configurations](https://github.com/scalar-labs/helm-charts/blob/main/docs/configure-custom-values-envoy.md#service-configurations). + +When using a load balancer, you must set the FQDN or IP address of the load balancer in the properties file for the application (client) as follows. + +* **Client properties file for ScalarDB Server** + + ```properties + scalar.db.contact_points= + scalar.db.contact_port=60051 + scalar.db.storage=grpc + scalar.db.transaction_manager=grpc + ``` + +* **Client properties file for ScalarDL Ledger** + + ```properties + scalar.dl.client.server.host= + scalar.dl.ledger.server.port=50051 + scalar.dl.ledger.server.privileged_port=50052 + ``` + +* **Client properties file for ScalarDL Ledger with ScalarDL Auditor mode enabled** + + ```properties + # Ledger + scalar.dl.client.server.host= + scalar.dl.ledger.server.port=50051 + scalar.dl.ledger.server.privileged_port=50052 + + # Auditor + scalar.dl.client.auditor.enabled=true + scalar.dl.client.auditor.host= + scalar.dl.auditor.server.port=40051 + scalar.dl.auditor.server.privileged_port=40052 + ``` + +The concrete implementation of the load balancer and access method depend on the Kubernetes cluster. If you are using a managed Kubernetes cluster, see the following official documentation based on your cloud service provider: + +* **Amazon Elastic Kubernetes Service (EKS)** + * [Network load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/network-load-balancing.html) +* **Azure Kubernetes Service (AKS)** + * [Use a public standard load balancer in Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/load-balancer-standard) + * [Use an internal load balancer with Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/internal-lb) + +## Run client requests to ScalarDB or ScalarDL from a bastion server (for testing purposes only; not recommended in a production environment) + +You can run client requests to ScalarDB or ScalarDL from a bastion server by running the `kubectl port-forward` command. If you create a ScalarDL Auditor mode environment, however, you must run two `kubectl port-forward` commands with different kubeconfig files from one bastion server to access two Kubernetes clusters. + +1. **(ScalarDL Auditor mode only)** In the bastion server for ScalarDL Ledger, configure an existing kubeconfig file or add a new kubeconfig file to access the Kubernetes cluster for ScalarDL Auditor. For details on how to configure the kubeconfig file of each managed Kubernetes cluster, see [Configure kubeconfig](./CreateBastionServer.md#configure-kubeconfig). +2. Configure port forwarding to each service from the bastion server. + * **ScalarDB Server** + + ```console + kubectl port-forward -n svc/-envoy 60051:60051 + ``` + + * **ScalarDL Ledger** + + ```console + kubectl --context port-forward -n svc/-envoy 50051:50051 + kubectl --context port-forward -n svc/-envoy 50052:50052 + ``` + + * **ScalarDL Auditor** + + ```console + kubectl --context port-forward -n svc/-envoy 40051:40051 + kubectl --context port-forward -n svc/-envoy 40052:40052 + ``` + +3. Configure the properties file to access ScalarDB or ScalarDL via `localhost`. + * **Client properties file for ScalarDB Server** + + ```properties + scalar.db.contact_points=localhost + scalar.db.contact_port=60051 + scalar.db.storage=grpc + scalar.db.transaction_manager=grpc + ``` + + * **Client properties file for ScalarDL Ledger** + + ```properties + scalar.dl.client.server.host=localhost + scalar.dl.ledger.server.port=50051 + scalar.dl.ledger.server.privileged_port=50052 + ``` + + * **Client properties file for ScalarDL Ledger with ScalarDL Auditor mode enabled** + + ```properties + # Ledger + scalar.dl.client.server.host=localhost + scalar.dl.ledger.server.port=50051 + scalar.dl.ledger.server.privileged_port=50052 + + # Auditor + scalar.dl.client.auditor.enabled=true + scalar.dl.client.auditor.host=localhost + scalar.dl.auditor.server.port=40051 + scalar.dl.auditor.server.privileged_port=40052 + ``` diff --git a/docs/3.12/scalar-kubernetes/AwsMarketplaceGuide.md b/docs/3.12/scalar-kubernetes/AwsMarketplaceGuide.md new file mode 100644 index 00000000..dcee33ad --- /dev/null +++ b/docs/3.12/scalar-kubernetes/AwsMarketplaceGuide.md @@ -0,0 +1,262 @@ +# How to install Scalar products through AWS Marketplace + +Scalar products (ScalarDB, ScalarDL, and their tools) are available in the AWS Marketplace as container images. This guide explains how to install Scalar products through the AWS Marketplace. + +Note that some Scalar products are available under commercial licenses, and the AWS Marketplace provides those products as pay-as-you-go pricing or a Bring Your Own License (BYOL) option. If you select pay-as-you-go pricing, AWS will charge you our product license fee based on your usage. If you select BYOL, please make sure you have the appropriate license for the product. + +## Subscribe to Scalar products from AWS Marketplace + +1. Access to the AWS Marketplace. + * [ScalarDB Cluster Standard Edition (Pay-As-You-Go)](https://aws.amazon.com/marketplace/pp/prodview-jx6qxatkxuwm4) + * [ScalarDB Cluster Premium Edition (Pay-As-You-Go)](https://aws.amazon.com/marketplace/pp/prodview-djqw3zk6dwyk6) + * [ScalarDB Cluster (BYOL)](https://aws.amazon.com/marketplace/pp/prodview-alcwrmw6v4cfy) + * [ScalarDL Ledger (Pay-As-You-Go)](https://aws.amazon.com/marketplace/pp/prodview-wttioaezp5j6e) + * [ScalarDL Auditor (Pay-As-You-Go)](https://aws.amazon.com/marketplace/pp/prodview-ke3yiw4mhriuu) + * [ScalarDL Ledger (BYOL)](https://aws.amazon.com/marketplace/pp/prodview-3jdwfmqonx7a2) + * [ScalarDL Auditor (BYOL)](https://aws.amazon.com/marketplace/pp/prodview-tj7svy75gu7m6) + * [[Deprecated] ScalarDB Server (BYOL)](https://aws.amazon.com/marketplace/pp/prodview-rzbuhxgvqf4d2) + +1. Select **Continue to Subscribe**. + +1. Sign in to AWS Marketplace using your IAM user. + If you have already signed in, this step will be skipped automatically. + +1. Read the **Terms and Conditions** and select **Accept Terms**. + It takes some time. When it's done, you can see the current date in the **Effective date** column. + Also, you can see our products on the [Manage subscriptions](https://us-east-1.console.aws.amazon.com/marketplace/home#/subscriptions) page of AWS Console. + +## **[Pay-As-You-Go]** Deploy containers on EKS (Amazon Elastic Kubernetes Service) from AWS Marketplace using Scalar Helm Charts + +By subscribing to Scalar products in the AWS Marketplace, you can pull the container images of Scalar products from the private container registry ([ECR](https://aws.amazon.com/ecr/)) of the AWS Marketplace. This section explains how to deploy Scalar products with pay-as-you-go pricing in your [EKS](https://aws.amazon.com/eks/) cluster from the private container registry. + +1. Create an OIDC provider. + + You must create an identity and access management (IAM) OpenID Connect (OIDC) provider to run the AWS Marketplace Metering Service from ScalarDL pods. + + ```console + eksctl utils associate-iam-oidc-provider --region --cluster --approve + ``` + + For details, see [Creating an IAM OIDC provider for your cluster](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html). + +1. Create a service account. + + To allow your pods to run the AWS Marketplace Metering Service, you can use [IAM roles for service accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html). + + ```console + eksctl create iamserviceaccount \ + --name \ + --namespace \ + --region \ + --cluster \ + --attach-policy-arn arn:aws:iam::aws:policy/AWSMarketplaceMeteringFullAccess \ + --approve \ + --override-existing-serviceaccounts + ``` + +1. Update the custom values file of the Helm Chart of a Scalar product you want to install. + + You need to specify the private container registry (ECR) of the AWS Marketplace as the value for `[].image.repository` in the custom values file. You also need to specify the service account name that you created in the previous step as the value for `[].serviceAccount.serviceAccountName` and set `[].serviceAccount.automountServiceAccountToken` to `true`. + + * ScalarDB Cluster Examples + * ScalarDB Cluster Standard Edition (scalardb-cluster-standard-custom-values.yaml) + ```yaml + scalardbCluster: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardb-cluster-node-aws-payg-standard" + serviceAccount: + serviceAccountName: "" + automountServiceAccountToken: true + ``` + * ScalarDB Cluster Premium Edition (scalardb-cluster-premium-custom-values.yaml) + ```yaml + scalardbCluster: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardb-cluster-node-aws-payg-premium" + serviceAccount: + serviceAccountName: "" + automountServiceAccountToken: true + ``` + * ScalarDL Examples + * ScalarDL Ledger (scalardl-ledger-custom-values.yaml) + ```yaml + ledger: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardl-ledger-aws-payg" + serviceAccount: + serviceAccountName: "" + automountServiceAccountToken: true + ``` + * ScalarDL Auditor (scalardl-auditor-custom-values.yaml) + ```yaml + auditor: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardl-auditor-aws-payg" + serviceAccount: + serviceAccountName: "" + automountServiceAccountToken: true + ``` + * ScalarDL Schema Loader for Ledger (schema-loader-ledger-custom-values.yaml) + ```yaml + schemaLoading: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardl-schema-loader-ledger-aws-payg" + ``` + * ScalarDL Schema Loader for Auditor (schema-loader-auditor-custom-values.yaml) + ```yaml + schemaLoading: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardl-schema-loader-auditor-aws-payg" + ``` + +1. Deploy Scalar products by using Helm Charts in conjunction with the above custom values files. + * ScalarDB Cluster Examples + * ScalarDB Cluster Standard Edition + ```console + helm install scalardb-cluster-standard scalar-labs/scalardb-cluster -f scalardb-cluster-standard-custom-values.yaml + ``` + * ScalarDB Cluster Premium Edition + ```console + helm install scalardb-cluster-premium scalar-labs/scalardb-cluster -f scalardb-cluster-premium-custom-values.yaml + ``` + * ScalarDL Examples + * ScalarDL Ledger + ```console + helm install scalardl-ledger scalar-labs/scalardl -f ./scalardl-ledger-custom-values.yaml + ``` + * ScalarDL Auditor + ```console + helm install scalardl-auditor scalar-labs/scalardl-audit -f ./scalardl-auditor-custom-values.yaml + ``` + * ScalarDL Schema Loader (Ledger) + ```console + helm install schema-loader scalar-labs/schema-loading -f ./schema-loader-ledger-custom-values.yaml + ``` + * ScalarDL Schema Loader (Auditor) + ```console + helm install schema-loader scalar-labs/schema-loading -f ./schema-loader-auditor-custom-values.yaml + ``` + +## **[BYOL]** Deploy containers on EKS (Amazon Elastic Kubernetes Service) from AWS Marketplace using Scalar Helm Charts + +By subscribing to Scalar products in the AWS Marketplace, you can pull the container images of Scalar products from the private container registry ([ECR](https://aws.amazon.com/ecr/)) of the AWS Marketplace. This section explains how to deploy Scalar products with the BYOL option in your [EKS](https://aws.amazon.com/eks/) cluster from the private container registry. + +1. Update the custom values file of the Helm Chart of a Scalar product you want to install. + You need to specify the private container registry (ECR) of AWS Marketplace as the value of `[].image.repository` in the custom values file. + * ScalarDB Cluster Examples + ```yaml + scalardbCluster: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardb-cluster-node-aws-byol" + ``` + * ScalarDL Examples + * ScalarDL Ledger (scalardl-ledger-custom-values.yaml) + ```yaml + ledger: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalar-ledger" + ``` + * ScalarDL Auditor (scalardl-auditor-custom-values.yaml) + ```yaml + auditor: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalar-auditor" + ``` + * ScalarDL Schema Loader for Ledger (schema-loader-ledger-custom-values.yaml) + ```yaml + schemaLoading: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardl-schema-loader-ledger" + ``` + * ScalarDL Schema Loader for Auditor (schema-loader-auditor-custom-values.yaml) + ```yaml + schemaLoading: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardl-schema-loader-auditor" + ``` + +1. Deploy the Scalar products using the Helm Chart with the above custom values files. + * ScalarDB Cluster Examples + ```console + helm install scalardb-cluster scalar-labs/scalardb-cluster -f scalardb-cluster-custom-values.yaml + ``` + * ScalarDL Examples + * ScalarDL Ledger + ```console + helm install scalardl-ledger scalar-labs/scalardl -f ./scalardl-ledger-custom-values.yaml + ``` + * ScalarDL Auditor + ```console + helm install scalardl-auditor scalar-labs/scalardl-audit -f ./scalardl-auditor-custom-values.yaml + ``` + * ScalarDL Schema Loader (Ledger) + ```console + helm install schema-loader scalar-labs/schema-loading -f ./schema-loader-ledger-custom-values.yaml + ``` + * ScalarDL Schema Loader (Auditor) + ```console + helm install schema-loader scalar-labs/schema-loading -f ./schema-loader-auditor-custom-values.yaml + ``` + +## **[BYOL]** Deploy containers on Kubernetes other than EKS from AWS Marketplace using Scalar Helm Charts + +1. Install the `aws` command according to the [AWS Official Document (Installing or updating the latest version of the AWS CLI)](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html). + +1. Configure the AWS CLI with your credentials according to the [AWS Official Document (Configuration basics)](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html). + +1. Create a `reg-ecr-mp-secrets` secret resource for pulling the container images from the ECR of AWS Marketplace. + ```console + kubectl create secret docker-registry reg-ecr-mp-secrets \ + --docker-server=709825985650.dkr.ecr.us-east-1.amazonaws.com \ + --docker-username=AWS \ + --docker-password=$(aws ecr get-login-password --region us-east-1) + ``` + +1. Update the custom values file of the Helm Chart of a Scalar product you want to install. + You need to specify the private container registry (ECR) of AWS Marketplace as the value of `[].image.repository` in the custom values file. + Also, you need to specify the `reg-ecr-mp-secrets` as the value of `[].imagePullSecrets`. + * ScalarDB Cluster Examples + ```yaml + scalardbCluster: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardb-cluster-node-aws-byol" + imagePullSecrets: + - name: "reg-ecr-mp-secrets" + ``` + * ScalarDL Examples + * ScalarDL Ledger (scalardl-ledger-custom-values.yaml) + ```yaml + ledger: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalar-ledger" + imagePullSecrets: + - name: "reg-ecr-mp-secrets" + ``` + * ScalarDL Auditor (scalardl-auditor-custom-values.yaml) + ```yaml + auditor: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalar-auditor" + imagePullSecrets: + - name: "reg-ecr-mp-secrets" + ``` + * ScalarDL Schema Loader for Ledger (schema-loader-ledger-custom-values.yaml) + ```yaml + schemaLoading: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardl-schema-loader-ledger" + imagePullSecrets: + - name: "reg-ecr-mp-secrets" + ``` + * ScalarDL Schema Loader for Auditor (schema-loader-auditor-custom-values.yaml) + ```yaml + schemaLoading: + image: + repository: "709825985650.dkr.ecr.us-east-1.amazonaws.com/scalar/scalardl-schema-loader-auditor" + imagePullSecrets: + - name: "reg-ecr-mp-secrets" + ``` + +1. Deploy the Scalar products using the Helm Chart with the above custom values files. + * Examples + Please refer to the **[BYOL] Deploy containers on EKS (Amazon Elastic Kubernetes Service) from AWS Marketplace using Scalar Helm Charts** section of this document. diff --git a/docs/3.12/scalar-kubernetes/AzureMarketplaceGuide.md b/docs/3.12/scalar-kubernetes/AzureMarketplaceGuide.md new file mode 100644 index 00000000..ae1727e4 --- /dev/null +++ b/docs/3.12/scalar-kubernetes/AzureMarketplaceGuide.md @@ -0,0 +1,163 @@ +# How to install Scalar products through Azure Marketplace + +Scalar products (ScalarDB, ScalarDL, and their tools) are provided in Azure Marketplace as container offers. This guide explains how to install Scalar products through Azure Marketplace. + +Note that some Scalar products are licensed under commercial licenses, and the Azure Marketplace provides them as BYOL (Bring Your Own License). Please make sure you have appropriate licenses. + +## Get Scalar products from Microsoft Azure Marketplace + +1. Access to the Microsoft Azure Marketplace. + * [ScalarDB](https://azuremarketplace.microsoft.com/en/marketplace/apps/scalarinc.scalardb) + * [ScalarDL](https://azuremarketplace.microsoft.com/en/marketplace/apps/scalarinc.scalardl) + +1. Select **Get It Now**. + +1. Sign in to Azure Marketplace using your work email address. + Please use the work email address that is used as an account of Microsoft Azure. + If you have already signed in, this step will be skipped automatically. + +1. Input your information. +Note that **Company** is not required, but please enter it. + +1. Select a **Software plan** you need from the pull-down. + **Software plan** means a combination of the container image and the license. Please select the *Software plan* you use. + +1. Select **Continue**. + After selecting the **Continue**, it automatically moves to the Azure Portal. + +1. Create a private container registry (Azure Container Registry). + Follow the on-screen instructions, please create your private container registry. + The container images of Scalar products will be copied to your private container registry. + +1. Repeat these steps as needed. + You need several container images to run Scalar products on Kubernetes, but Azure Marketplace copies only one container image at a time. So, you need to subscribe to several software plans (repeat subscribe operation) as needed. + * Container images that you need are the following. + * ScalarDB + * ScalarDB Cluster (BYOL) + * [Deprecated] ScalarDB Server Default (2vCPU, 4GiB Memory) + * [Deprecated] ScalarDB GraphQL Server (optional) + * [Deprecated] ScalarDB SQL Server (optional) + * ScalarDL + * ScalarDL Ledger Default (2vCPU, 4GiB Memory) + * ScalarDL Auditor Default (2vCPU, 4GiB Memory) + * The **ScalarDL Auditor** is optional. If you use the **ScalarDL Auditor**, subscribe to it. + * ScalarDL Schema Loader + +Now, you can pull the container images of the Scalar products from your private container registry. +Please refer to the [Azure Container Registry documentation](https://docs.microsoft.com/en-us/azure/container-registry/) for more details about the Azure Container Registry. + +## Deploy containers on AKS (Azure Kubernetes Service) from your private container registry using Scalar Helm Charts + +1. Specify your private container registry (Azure Container Registry) when you create an AKS cluster. + * GUI (Azure Portal) + At the **Azure Container Registry** parameter in the **Integrations** tab, please specify your private container registry. + * CLI ([az aks create](https://docs.microsoft.com/en-us/cli/azure/aks?view=azure-cli-latest#az-aks-create) command) + Please specify `--attach-acr` flag with the name of your private container registry. Also, you can configure Azure Container Registry integration for existing AKS clusters using [az aks update](https://docs.microsoft.com/en-us/cli/azure/aks?view=azure-cli-latest#az-aks-update) command with `--attach-acr` flag. Please refer to the [Azure Official Document](https://docs.microsoft.com/en-us/azure/aks/cluster-container-registry-integration) for more details. + +1. Update the custom values file of the Helm Chart of a Scalar product you want to install. + You need to specify your private container registry as the value of `[].image.repository` in the custom values file. + * ScalarDB Cluster Examples + ```yaml + scalardbCluster: + image: + repository: "example.azurecr.io/scalarinc/scalardb-cluster-node-azure-byol" + ``` + * ScalarDL Examples + * ScalarDL Ledger (scalardl-ledger-custom-values.yaml) + ```yaml + ledger: + image: + repository: "example.azurecr.io/scalarinc/scalar-ledger" + ``` + * ScalarDL Auditor (scalardl-auditor-custom-values.yaml) + ```yaml + auditor: + image: + repository: "example.azurecr.io/scalarinc/scalar-auditor" + ``` + * ScalarDL Schema Loader (schema-loader-custom-values.yaml) + ```yaml + schemaLoading: + image: + repository: "example.azurecr.io/scalarinc/scalardl-schema-loader" + ``` + +1. Deploy the Scalar product using the Helm Chart with the above custom values file. + * ScalarDB Cluster Examples + ```console + helm install scalardb-cluster scalar-labs/scalardb-cluster -f scalardb-cluster-custom-values.yaml + ``` + * ScalarDL Examples + * ScalarDL Ledger + ```console + helm install scalardl-ledger scalar-labs/scalardl -f ./scalardl-ledger-custom-values.yaml + ``` + * ScalarDL Auditor + ```console + helm install scalardl-auditor scalar-labs/scalardl-audit -f ./scalardl-auditor-custom-values.yaml + ``` + * ScalarDL Schema Loader + ```console + helm install schema-loader scalar-labs/schema-loading -f ./schema-loader-custom-values.yaml + ``` + +## Deploy containers on Kubernetes other than AKS (Azure Kubernetes Service) from your private container registry using Scalar Helm Charts + +1. Install the `az` command according to the [Azure Official Document (How to install the Azure CLI)](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli). + +1. Sign in with Azure CLI. + ```console + az login + ``` + +1. Create a **service principal** for authentication to your private container registry according to the [Azure Official Document (Azure Container Registry authentication with service principals)](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-auth-service-principal). + We use the **Service principal ID** and the **Service principal password** in the next step. + +1. Create a `reg-acr-secrets` secret resource for pulling the container images from your private container registry. + ```console + kubectl create secret docker-registry reg-acr-secrets \ + --docker-server= \ + --docker-username= \ + --docker-password= + ``` + +1. Update the custom values file of the Helm Chart of a Scalar product you want to install. + You need to specify your private container registry as the value of `[].image.repository` in the custom values file. + Also, you need to specify the `reg-acr-secrets` as the value of `[].imagePullSecrets`. + * ScalarDB Cluster Examples + ```yaml + scalardbCluster: + image: + repository: "example.azurecr.io/scalarinc/scalardb-cluster-node-azure-byol" + imagePullSecrets: + - name: "reg-acr-secrets" + ``` + * ScalarDL Examples + * ScalarDL Ledger (scalardl-ledger-custom-values.yaml) + ```yaml + ledger: + image: + repository: "example.azurecr.io/scalarinc/scalar-ledger" + imagePullSecrets: + - name: "reg-acr-secrets" + ``` + * ScalarDL Auditor (scalardl-auditor-custom-values.yaml) + ```yaml + auditor: + image: + repository: "example.azurecr.io/scalarinc/scalar-auditor" + imagePullSecrets: + - name: "reg-acr-secrets" + ``` + * ScalarDL Schema Loader (schema-loader-custom-values.yaml) + ```yaml + schemaLoading: + image: + repository: "example.azurecr.io/scalarinc/scalardl-schema-loader" + imagePullSecrets: + - name: "reg-acr-secrets" + ``` + +1. Deploy the Scalar product using the Helm Chart with the above custom values file. + * Examples + Please refer to the **Deploy containers on AKS (Azure Kubernetes Service) from your private container registry using Scalar Helm Charts** section of this document. diff --git a/docs/3.12/scalar-kubernetes/BackupNoSQL.md b/docs/3.12/scalar-kubernetes/BackupNoSQL.md new file mode 100644 index 00000000..b3817e18 --- /dev/null +++ b/docs/3.12/scalar-kubernetes/BackupNoSQL.md @@ -0,0 +1,148 @@ +# Back up a NoSQL database in a Kubernetes environment + +This guide explains how to create a transactionally consistent backup of managed databases that ScalarDB or ScalarDL uses in a Kubernetes environment. Please note that, when using a NoSQL database or multiple databases, you **must** pause ScalarDB or ScalarDL to create a transactionally consistent backup. + +For details on how ScalarDB backs up databases, see [A Guide on How to Backup and Restore Databases Used Through ScalarDB](https://github.com/scalar-labs/scalardb/blob/master/docs/backup-restore.md). + +In this guide, we assume that you are using point-in-time recovery (PITR) or its equivalent features. Therefore, we must create a period where there are no ongoing transactions for restoration. You can then restore data to that specific period by using PITR. If you restore data to a time without creating a period where there are no ongoing transactions, the restored data could be transactionally inconsistent, causing ScalarDB or ScalarDL to not work properly with the data. + +## Create a period to restore data, and perform a backup + +1. Check the following four points by running the `kubectl get pod` command before starting the backup operation: + * **The number of ScalarDB or ScalarDL pods.** Write down the number of pods so that you can compare that number with the number of pods after performing the backup. + * **The ScalarDB or ScalarDL pod names in the `NAME` column.** Write down the pod names so that you can compare those names with the pod names after performing the backup. + * **The ScalarDB or ScalarDL pod status is `Running` in the `STATUS` column.** Confirm that the pods are running before proceeding with the backup. You will need to pause the pods in the next step. + * **The restart count of each pod in the `RESTARTS` column.** Write down the restart count of each pod so that you can compare the count with the restart counts after performing the backup. +2. Pause the ScalarDB or ScalarDL pods by using `scalar-admin`. For details on how to pause the pods, see the [Details on using `scalar-admin`](./BackupNoSQL.md#details-on-using-scalar-admin) section in this guide. +3. Write down the `pause completed` time. You will need to refer to that time when restoring the data by using the PITR feature. +4. Back up each database by using the backup feature. If you have enabled the automatic backup and PITR features, the managed databases will perform back up automatically. Please note that you should wait for approximately 10 seconds so that you can create a sufficiently long period to avoid a clock skew issue between the client clock and the database clock. This 10-second period is the exact period in which you can restore data by using the PITR feature. +5. Unpause ScalarDB or ScalarDL pods by using `scalar-admin`. For details on how to unpause the pods, see the [Details on using `scalar-admin`](./BackupNoSQL.md#details-on-using-scalar-admin) section in this guide. +6. Check the `unpause started` time. You must check the `unpause started` time to confirm the exact period in which you can restore data by using the PITR feature. +7. Check the pod status after performing the backup. You must check the following four points by using the `kubectl get pod` command after the backup operation is completed. + * **The number of ScalarDB or ScalarDL pods.** Confirm this number matches the number of pods that you wrote down before performing the backup. + * **The ScalarDB or ScalarDL pod names in the `NAME` column.** Confirm the names match the pod names that you wrote down before performing the backup. + * **The ScalarDB or ScalarDL pod status is `Running` in the `STATUS` column.** + * **The restart count of each pod in the `RESTARTS` column.** Confirm the counts match the restart counts that you wrote down before performing the backup + + **If any of the two values are different, you must retry the backup operation from the beginning.** The reason for the different values may be caused by some pods being added or restarted while performing the backup. In such case, those pods will run in the `unpause` state. Pods in the `unpause` state will cause the backup data to be transactionally inconsistent. +8. **(Amazon DynamoDB only)** If you use the PITR feature of DynamoDB, you will need to perform additional steps to create a backup because the feature restores data with another name table by using PITR. For details on the additional steps after creating the exact period in which you can restore the data, please see [Restore databases in a Kubernetes environment](./RestoreDatabase.md#amazon-dynamodb). + +## Back up multiple databases + +If you have two or more databases that the [Multi-storage Transactions](https://github.com/scalar-labs/scalardb/blob/master/docs/multi-storage-transactions.md) or [Two-phase Commit Transactions](https://github.com/scalar-labs/scalardb/blob/master/docs/two-phase-commit-transactions.md) feature uses, you must pause all instances of ScalarDB or ScalarDL and create the same period where no ongoing transactions exist in the databases. + +To ensure consistency between multiple databases, you must restore the databases to the same point in time by using the PITR feature. + +## Details on using `scalar-admin` + +### Check the Kubernetes resource name + +You must specify the SRV service URL to the `-s (--srv-service-url)` flag. In Kubernetes environments, the format of the SRV service URL is `_my-port-name._my-port-protocol.my-svc.my-namespace.svc.cluster.local`. + +If you use Scalar Helm Charts to deploy ScalarDB or ScalarDL, the `my-svc` and `my-namespace` may vary depending on your environment. You must specify the headless service name as `my-svc` and the namespace as `my-namespace`. + +* Example + * ScalarDB Server + + ```console + _scalardb._tcp.-headless..svc.cluster.local + ``` + + * ScalarDL Ledger + + ```console + _scalardl-admin._tcp.-headless..svc.cluster.local + ``` + + * ScalarDL Auditor + + ```console + _scalardl-auditor-admin._tcp.-headless..svc.cluster.local + ``` + +The helm release name decides the headless service name `-headless`. You can see the helm release name by running the `helm list` command. + +```console +$ helm list -n ns-scalar +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +scalardb ns-scalar 1 2023-02-09 19:31:40.527130674 +0900 JST deployed scalardb-2.5.0 3.8.0 +scalardl-auditor ns-scalar 1 2023-02-09 19:32:03.008986045 +0900 JST deployed scalardl-audit-2.5.1 3.7.1 +scalardl-ledger ns-scalar 1 2023-02-09 19:31:53.459548418 +0900 JST deployed scalardl-4.5.1 3.7.1 +``` + +You can also see the headless service name `-headless` by running the `kubectl get service` command. + +```console +$ kubectl get service -n ns-scalar +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +scalardb-envoy LoadBalancer 10.99.245.143 60051:31110/TCP 2m2s +scalardb-envoy-metrics ClusterIP 10.104.56.87 9001/TCP 2m2s +scalardb-headless ClusterIP None 60051/TCP 2m2s +scalardb-metrics ClusterIP 10.111.213.194 8080/TCP 2m2s +scalardl-auditor-envoy LoadBalancer 10.111.141.43 40051:31553/TCP,40052:31171/TCP 99s +scalardl-auditor-envoy-metrics ClusterIP 10.104.245.188 9001/TCP 99s +scalardl-auditor-headless ClusterIP None 40051/TCP,40053/TCP,40052/TCP 99s +scalardl-auditor-metrics ClusterIP 10.105.119.158 8080/TCP 99s +scalardl-ledger-envoy LoadBalancer 10.96.239.167 50051:32714/TCP,50052:30857/TCP 109s +scalardl-ledger-envoy-metrics ClusterIP 10.97.204.18 9001/TCP 109s +scalardl-ledger-headless ClusterIP None 50051/TCP,50053/TCP,50052/TCP 109s +scalardl-ledger-metrics ClusterIP 10.104.216.189 8080/TCP 109s +``` + +### Pause + +You can send a pause request to ScalarDB or ScalarDL pods in a Kubernetes environment. + +* Example + * ScalarDB Server + + ```console + kubectl run scalar-admin-pause --image=ghcr.io/scalar-labs/scalar-admin: --restart=Never -it -- -c pause -s _scalardb._tcp.-headless..svc.cluster.local + ``` + + * ScalarDL Ledger + + ```console + kubectl run scalar-admin-pause --image=ghcr.io/scalar-labs/scalar-admin: --restart=Never -it -- -c pause -s _scalardl-admin._tcp.-headless..svc.cluster.local + ``` + + * ScalarDL Auditor + + ```console + kubectl run scalar-admin-pause --image=ghcr.io/scalar-labs/scalar-admin: --restart=Never -it -- -c pause -s _scalardl-auditor-admin._tcp.-headless..svc.cluster.local + ``` + +### Unpause + +You can send an unpause request to ScalarDB or ScalarDL pods in a Kubernetes environment. + +* Example + * ScalarDB Server + + ```console + kubectl run scalar-admin-unpause --image=ghcr.io/scalar-labs/scalar-admin: --restart=Never -it -- -c unpause -s _scalardb._tcp.-headless..svc.cluster.local + ``` + + * ScalarDL Ledger + + ```console + kubectl run scalar-admin-unpause --image=ghcr.io/scalar-labs/scalar-admin: --restart=Never -it -- -c unpause -s _scalardl-admin._tcp.-headless..svc.cluster.local + ``` + + * ScalarDL Auditor + + ```console + kubectl run scalar-admin-unpause --image=ghcr.io/scalar-labs/scalar-admin: --restart=Never -it -- -c unpause -s _scalardl-auditor-admin._tcp.-headless..svc.cluster.local + ``` + +### Check the `pause completed` time and `unpause started` time + +The `scalar-admin` pods output the `pause completed` time and `unpause started` time to stdout. You can also see those times by running the `kubectl logs` command. + +```console +kubectl logs scalar-admin-pause +``` + +```console +kubectl logs scalar-admin-unpause +``` diff --git a/docs/3.12/scalar-kubernetes/BackupRDB.md b/docs/3.12/scalar-kubernetes/BackupRDB.md new file mode 100644 index 00000000..37af70f4 --- /dev/null +++ b/docs/3.12/scalar-kubernetes/BackupRDB.md @@ -0,0 +1,14 @@ +# Back up an RDB in a Kubernetes environment + +This guide explains how to create a backup of a single relational database (RDB) that ScalarDB or ScalarDL uses in a Kubernetes environment. Please note that this guide assumes that you are using a managed database from a cloud services provider. + +If you have two or more RDBs that the [Multi-storage Transactions](https://github.com/scalar-labs/scalardb/blob/master/docs/multi-storage-transactions.md) or [Two-phase Commit Transactions](https://github.com/scalar-labs/scalardb/blob/master/docs/two-phase-commit-transactions.md) feature uses, you must follow the instructions in [Back up a NoSQL database in a Kubernetes environment](./BackupNoSQL.md) instead. + +## Perform a backup + +To perform backups, you should enable the automated backup feature available in the managed databases. By enabling this feature, you do not need to perform any additional backup operations. For details on the backup configurations in each managed database, see the following guides: + +* [Set up a database for ScalarDB/ScalarDL deployment on AWS](./SetupDatabaseForAWS.md) +* [Set up a database for ScalarDB/ScalarDL deployment on Azure](./SetupDatabaseForAzure.md) + +Because the managed RDB keeps backup data consistent from a transactions perspective, you can restore backup data to any point in time by using the point-in-time recovery (PITR) feature in the managed RDB. diff --git a/docs/3.12/scalar-kubernetes/BackupRestoreGuide.md b/docs/3.12/scalar-kubernetes/BackupRestoreGuide.md new file mode 100644 index 00000000..1ae736d8 --- /dev/null +++ b/docs/3.12/scalar-kubernetes/BackupRestoreGuide.md @@ -0,0 +1,41 @@ +# Back up and restore ScalarDB or ScalarDL data in a Kubernetes environment + +This guide explains how to backup and restore ScalarDB or ScalarDL data in a Kubernetes environment. Please note that this guide assumes that you are using a managed database from a cloud services provider as the backend database for ScalarDB or ScalarDL. The following is a list of the managed databases that this guide assumes you might be using: + +* NoSQL: does not support transactions + * Amazon DynamoDB + * Azure Cosmos DB for NoSQL +* Relational database (RDB): supports transactions + * Amazon RDS + * MySQL + * Oracle + * PostgreSQL + * SQL Server + * Amazon Aurora + * MySQL + * PostgreSQL + * Azure Database + * MySQL + * PostgreSQL + +For details on how to back up and restore databases used with ScalarDB in a transactionally consistent way, see [A Guide on How to Backup and Restore Databases Used Through ScalarDB](https://github.com/scalar-labs/scalardb/blob/master/docs/backup-restore.md). + +## Perform a backup + +### Confirm the type of database and number of databases you are using + +How you perform backup and restore depends on the type of database (NoSQL or RDB) and the number of databases you are using. + +#### NoSQL or multiple databases + +If you are using a NoSQL database, or if you have two or more databases that the [Multi-storage Transactions](https://github.com/scalar-labs/scalardb/blob/master/docs/multi-storage-transactions.md) or [Two-phase Commit Transactions](https://github.com/scalar-labs/scalardb/blob/master/docs/two-phase-commit-transactions.md) feature uses, please see [Back up a NoSQL database in a Kubernetes environment](./BackupNoSQL.md) for details on how to perform a backup. + +#### Single RDB + +If you are using a single RDB, please see [Back up an RDB in a Kubernetes environment](./BackupRDB.md) for details on how to perform a backup. + +If you have two or more RDBs that the [Multi-storage Transactions](https://github.com/scalar-labs/scalardb/blob/master/docs/multi-storage-transactions.md) or [Two-phase Commit Transactions](https://github.com/scalar-labs/scalardb/blob/master/docs/two-phase-commit-transactions.md) feature uses, you must follow the instructions in [Back up a NoSQL database in a Kubernetes environment](./BackupNoSQL.md) instead. + +## Restore a database + +For details on how to restore data from a managed database, please see [Restore databases in a Kubernetes environment](./RestoreDatabase.md). diff --git a/docs/3.12/scalar-kubernetes/CreateAKSClusterForScalarDB.md b/docs/3.12/scalar-kubernetes/CreateAKSClusterForScalarDB.md new file mode 100644 index 00000000..2b645e11 --- /dev/null +++ b/docs/3.12/scalar-kubernetes/CreateAKSClusterForScalarDB.md @@ -0,0 +1,100 @@ +# Guidelines for creating an AKS cluster for ScalarDB Server + +This document explains the requirements and recommendations for creating an Azure Kubernetes Service (AKS) cluster for ScalarDB Server deployment. For details on how to deploy ScalarDB Server on an AKS cluster, see [Deploy ScalarDB Server on AKS](./ManualDeploymentGuideScalarDBServerOnAKS.md). + +## Before you begin + +You must create an AKS cluster based on the following requirements, recommendations, and your project's requirements. For specific details about how to create an AKS cluster, refer to the following official Microsoft documentation based on the tool you use in your environment: + +* [Azure CLI](https://learn.microsoft.com/en-us/azure/aks/learn/quick-kubernetes-deploy-cli) +* [PowerShell](https://learn.microsoft.com/en-us/azure/aks/learn/quick-kubernetes-deploy-powershell) +* [Azure portal](https://learn.microsoft.com/en-us/azure/aks/learn/quick-kubernetes-deploy-portal) + +## Requirements + +When deploying ScalarDB Server, you must: + +* Create the AKS cluster by using Kubernetes version 1.21 or higher. +* Configure the AKS cluster based on the version of Kubernetes and your project's requirements. + +## Recommendations (optional) + +The following are some recommendations for deploying ScalarDB Server. These recommendations are not required, so you can choose whether or not to apply these recommendations based on your needs. + +### Create at least three worker nodes and three pods + +To ensure that the AKS cluster has high availability, you should use at least three worker nodes and deploy at least three pods spread across the worker nodes. You can see the [sample configurations](../conf/scalardb-custom-values.yaml) of `podAntiAffinity` for making three pods spread across the worker nodes. + +{% capture notice--info %} +**Note** + +If you place the worker nodes in different [availability zones](https://learn.microsoft.com/en-us/azure/availability-zones/az-overview) (AZs), you can withstand an AZ failure. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Use 4vCPU / 8GB memory nodes for the worker node in the ScalarDB Server node pool + +From the perspective of commercial licenses, resources for one pod running ScalarDB Server are limited to 2vCPU / 4GB memory. In addition to the ScalarDB Server pod, Kubernetes could deploy some of the following components to each worker node: + +* ScalarDB Server pod (2vCPU / 4GB) +* Envoy proxy +* Your application pods (if you choose to run your application's pods on the same worker node) +* Monitoring components (if you deploy monitoring components such as `kube-prometheus-stack`) +* Kubernetes components + +With this in mind, you should use a worker node that has at least 4vCPU / 8GB memory resources and use at least three worker nodes for availability, as mentioned in [Create at least three worker nodes and three pods](#create-at-least-three-worker-nodes-and-three-pods). + +However, three nodes with at least 4vCPU / 8GB memory resources per node is the minimum for production environment. You should also consider the resources of the AKS cluster (for example, the number of worker nodes, vCPUs per node, memory per node, ScalarDB Server pods, and pods for your application), which depend on your system's workload. In addition, if you plan to scale the pods automatically by using some features like [Horizontal Pod Autoscaling (HPA)](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/), you should consider the maximum number of pods on the worker node when deciding the worker node resources. + +### Create a node pool for ScalarDB Server pods + +AKS creates one system node pool named **agentpool** that is preferred for system pods (used to keep AKS running) by default. We recommend creating another node pool with **user** mode for ScalarDB Server pods and deploying ScalarDB Server pods on this additional node pool. + +### Configure cluster autoscaler in AKS + +If you want to scale ScalarDB Server pods automatically by using [Horizontal Pod Autoscaler](https://learn.microsoft.com/en-us/azure/aks/concepts-scale#horizontal-pod-autoscaler), you should configure cluster autoscaler in AKS too. For details, refer to the official Microsoft documentation at [Cluster autoscaler](https://learn.microsoft.com/en-us/azure/aks/concepts-scale#cluster-autoscaler). + +In addition, if you configure cluster autoscaler, you should create a subnet in a virtual network (VNet) for AKS to ensure a sufficient number of IPs exist so that AKS can work without network issues after scaling. The required number of IPs varies depending on the networking plug-in. For more details about the number of IPs required, refer to the following: + +* [Use kubenet networking with your own IP address ranges in Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/configure-kubenet) +* [Configure Azure CNI networking in Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/configure-azure-cni) + +### Create the AKS cluster on a private network + +You should create the AKS cluster on a private network (private subnet in a VNet) since ScalarDB Server does not provide any services to users directly via internet access. We recommend accessing ScalarDB Server via a private network from your applications. + +### Create the AKS cluster by using Azure CNI, if necessary + +The AKS default networking plug-in is [kubenet](https://learn.microsoft.com/en-us/azure/aks/configure-kubenet). If your requirement does not match kubenet, you should use [Azure Container Networking Interface (CNI)](https://learn.microsoft.com/en-us/azure/aks/configure-azure-cni). + +For example, if you want to deploy multiple ScalarDB Server environments on one AKS cluster (e.g., deploy a multi-tenant ScalarDB Server) and you want to control the connection between each tenant by using [Kubernetes NetworkPolicies](https://kubernetes.io/docs/concepts/services-networking/network-policies/), kubenet supports only the Calico Network Policy, which the [Azure support team does not support](https://learn.microsoft.com/en-us/azure/aks/use-network-policies#differences-between-azure-network-policy-manager-and-calico-network-policy-and-their-capabilities). Please note that the Calico Network Policy is supported only by the Calico community or through additional paid support. + +The Azure support and engineering teams, however, do support Azure CNI. So, if you want to use Kubernetes NetworkPolicies and receive support from the Azure support team, you should use Azure CNI. For more details about the differences between kubenet and Azure CNI, refer to the following official Microsoft documentation: + +* [Network concepts for applications in Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/concepts-network) +* [Use kubenet networking with your own IP address ranges in Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/configure-kubenet) +* [Configure Azure CNI networking in Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/configure-azure-cni) + +### Restrict connections by using some security features based on your requirements + +You should restrict unused connections in ScalarDB Server. To restrict unused connections, you can use some security features in Azure, like [network security groups](https://learn.microsoft.com/en-us/azure/virtual-network/network-security-groups-overview). + +The connections (ports) that ScalarDB Server uses by default are as follows: + +* ScalarDB Server + * 60051/TCP (accepts requests from a client) + * 8080/TCP (accepts monitoring requests) +* Scalar Envoy (used with ScalarDB Server) + * 60051/TCP (load balancing for ScalarDB Server) + * 9001/TCP (accepts monitoring requests for Scalar Envoy itself) + +{% capture notice--info %} +**Note** + +- If you change the default listening port for ScalarDB Server in the configuration file (`database.properties`), you must allow connections by using the port that you configured. +- You must also allow the connections that AKS uses itself. For more details about AKS traffic requirements, refer to [Control egress traffic using Azure Firewall in Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/limit-egress-traffic). +{% endcapture %} + +
{{ notice--info | markdownify }}
+ diff --git a/docs/3.12/scalar-kubernetes/CreateAKSClusterForScalarDL.md b/docs/3.12/scalar-kubernetes/CreateAKSClusterForScalarDL.md new file mode 100644 index 00000000..242e9877 --- /dev/null +++ b/docs/3.12/scalar-kubernetes/CreateAKSClusterForScalarDL.md @@ -0,0 +1,109 @@ +# Guidelines for creating an AKS cluster for ScalarDL Ledger + +This document explains the requirements and recommendations for creating an Azure Kubernetes Service (AKS) cluster for ScalarDL Ledger deployment. For details on how to deploy ScalarDL Ledger on an AKS cluster, see [Deploy ScalarDL Ledger on AKS](./ManualDeploymentGuideScalarDLOnAKS.md). + +## Before you begin + +You must create an AKS cluster based on the following requirements, recommendations, and your project's requirements. For specific details about how to create an AKS cluster, refer to the following official Microsoft documentation based on the tool you use in your environment: + +* [Azure CLI](https://learn.microsoft.com/en-us/azure/aks/learn/quick-kubernetes-deploy-cli) +* [PowerShell](https://learn.microsoft.com/en-us/azure/aks/learn/quick-kubernetes-deploy-powershell) +* [Azure portal](https://learn.microsoft.com/en-us/azure/aks/learn/quick-kubernetes-deploy-portal) + +## Requirements + +When deploying ScalarDL Ledger, you must: + +* Create the AKS cluster by using Kubernetes version 1.21 or higher. +* Configure the AKS cluster based on the version of Kubernetes and your project's requirements. + +{% capture notice--warning %} +**Attention** + +For Byzantine fault detection in ScalarDL to work properly, do not deploy your application pods on the same AKS cluster as the ScalarDL Ledger deployment. +{% endcapture %} + +
{{ notice--warning | markdownify }}
+ +## Recommendations (optional) + +The following are some recommendations for deploying ScalarDL Ledger. These recommendations are not required, so you can choose whether or not to apply these recommendations based on your needs. + +### Create at least three worker nodes and three pods + +To ensure that the AKS cluster has high availability, you should use at least three worker nodes and deploy at least three pods spread across the worker nodes. You can see the [sample configurations](../conf/scalardl-custom-values.yaml) of `podAntiAffinity` for making three pods spread across the worker nodes. + +{% capture notice--info %} +**Note** + +If you place the worker nodes in different [availability zones](https://learn.microsoft.com/en-us/azure/availability-zones/az-overview) (AZs), you can withstand an AZ failure. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Use 4vCPU / 8GB memory nodes for the worker node in the ScalarDL Ledger node pool + +From the perspective of commercial licenses, resources for one pod running ScalarDL Ledger are limited to 2vCPU / 4GB memory. In addition to the ScalarDL Ledger pod, Kubernetes could deploy some of the following components to each worker node: + +* ScalarDL Ledger pod (2vCPU / 4GB) +* Envoy proxy +* Monitoring components (if you deploy monitoring components such as `kube-prometheus-stack`) +* Kubernetes components + +With this in mind, you should use a worker node that has at least 4vCPU / 8GB memory resources and use at least three worker nodes for availability, as mentioned in [Create at least three worker nodes and three pods](#create-at-least-three-worker-nodes-and-three-pods). + +However, three nodes with at least 4vCPU / 8GB memory resources per node is the minimum environment for production. You should also consider the resources of the AKS cluster (for example, the number of worker nodes, vCPUs per node, memory per node, and ScalarDL Ledger pods), which depend on your system's workload. In addition, if you plan to scale the pods automatically by using some features like [Horizontal Pod Autoscaling (HPA)](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/), you should consider the maximum number of pods on the worker node when deciding the worker node resources. + +### Create a node pool for ScalarDL Ledger pods + +AKS creates one system node pool named **agentpool** that is preferred for system pods (used to keep AKS running) by default. We recommend creating another node pool with **user** mode for ScalarDL Ledger pods and deploying ScalarDL Ledger pods on this additional node pool. + +### Configure cluster autoscaler in AKS + +If you want to scale ScalarDL Ledger pods automatically by using [Horizontal Pod Autoscaler](https://learn.microsoft.com/en-us/azure/aks/concepts-scale#horizontal-pod-autoscaler), you should configure cluster autoscaler in AKS too. For details, refer to the official Microsoft documentation at [Cluster autoscaler](https://learn.microsoft.com/en-us/azure/aks/concepts-scale#cluster-autoscaler). + +In addition, if you configure cluster autoscaler, you should create a subnet in a virtual network (VNet) for AKS to ensure a sufficient number of IPs exist so that AKS can work without network issues after scaling. The required number of IPs varies depending on the networking plug-in. For more details about the number of IPs required, refer to the following: + +* [Use kubenet networking with your own IP address ranges in Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/configure-kubenet) +* [Configure Azure CNI networking in Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/configure-azure-cni) + +### Create the AKS cluster on a private network + +You should create the AKS cluster on a private network (private subnet in a VNet) since ScalarDL Ledger does not provide any services to users directly via internet access. We recommend accessing ScalarDL Ledger via a private network from your applications. + +### Create the AKS cluster by using Azure CNI, if necessary + +The AKS default networking plug-in is [kubenet](https://learn.microsoft.com/en-us/azure/aks/configure-kubenet). If your requirement does not match kubenet, you should use [Azure Container Networking Interface (CNI)](https://learn.microsoft.com/en-us/azure/aks/configure-azure-cni). + +For example, if you want to deploy multiple ScalarDL Ledger environments on one AKS cluster (e.g., deploy multi-tenant ScalarDL Ledger) and you want to control the connection between each tenant by using [Kubernetes NetworkPolicies](https://kubernetes.io/docs/concepts/services-networking/network-policies/), kubenet supports only the Calico Network Policy, which the [Azure support team does not support](https://learn.microsoft.com/en-us/azure/aks/use-network-policies#differences-between-azure-network-policy-manager-and-calico-network-policy-and-their-capabilities). Please note that the Calico Network Policy is supported only by the Calico community or through additional paid support. + +The Azure support and engineering teams, however, do support Azure CNI. So, if you want to use Kubernetes NetworkPolicies and receive support from the Azure support team, you should use Azure CNI. For more details about the differences between kubenet and Azure CNI, refer to the following official Microsoft documentation: + +* [Network concepts for applications in Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/concepts-network) +* [Use kubenet networking with your own IP address ranges in Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/configure-kubenet) +* [Configure Azure CNI networking in Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/configure-azure-cni) + +### Restrict connections by using some security features based on your requirements + +You should restrict unused connections in ScalarDL Ledger. To restrict unused connections, you can use some security features in Azure, like [network security groups](https://learn.microsoft.com/en-us/azure/virtual-network/network-security-groups-overview). + +The connections (ports) that ScalarDL Ledger uses by default are as follows: + +* ScalarDL Ledger + * 50051/TCP (accepts requests from a client) + * 50052/TCP (accepts privileged requests from a client) + * 50053/TCP (accepts pause and unpause requests from a scalar-admin client tool) + * 8080/TCP (accepts monitoring requests) +* Scalar Envoy (used with ScalarDL Ledger) + * 50051/TCP (load balancing for ScalarDL Ledger) + * 50052/TCP (load balancing for ScalarDL Ledger) + * 9001/TCP (accepts monitoring requests for Scalar Envoy itself) + +{% capture notice--info %} +**Note** + +- If you change the default listening port for ScalarDL Ledger in the configuration file (`ledger.properties`), you must allow connections by using the port that you configured. +- You must also allow the connections that AKS uses itself. For more details about AKS traffic requirements, refer to [Control egress traffic using Azure Firewall in Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/limit-egress-traffic). +{% endcapture %} + +
{{ notice--info | markdownify }}
diff --git a/docs/3.12/scalar-kubernetes/CreateAKSClusterForScalarDLAuditor.md b/docs/3.12/scalar-kubernetes/CreateAKSClusterForScalarDLAuditor.md new file mode 100644 index 00000000..58a4919a --- /dev/null +++ b/docs/3.12/scalar-kubernetes/CreateAKSClusterForScalarDLAuditor.md @@ -0,0 +1,128 @@ +# Guidelines for creating an AKS cluster for ScalarDL Ledger and ScalarDL Auditor + +This document explains the requirements and recommendations for creating an Azure Kubernetes Service (AKS) cluster for ScalarDL Ledger and ScalarDL Auditor deployment. For details on how to deploy ScalarDL Ledger and ScalarDL Auditor on an AKS cluster, see [Deploy ScalarDL Ledger and ScalarDL Auditor on AKS](./ManualDeploymentGuideScalarDLAuditorOnAKS.md). + +## Before you begin + +You must create an AKS cluster based on the following requirements, recommendations, and your project's requirements. For specific details about how to create an AKS cluster, refer to the following official Microsoft documentation based on the tool you use in your environment: + +* [Azure CLI](https://learn.microsoft.com/ja-jp/azure/aks/learn/quick-kubernetes-deploy-cli) +* [PowerShell](https://learn.microsoft.com/ja-jp/azure/aks/learn/quick-kubernetes-deploy-powershell) +* [Azure portal](https://learn.microsoft.com/ja-jp/azure/aks/learn/quick-kubernetes-deploy-portal) + +## Requirements + +When deploying ScalarDL Ledger and ScalarDL Auditor, you must: + +* Create two AKS clusters by using Kubernetes version 1.21 or higher. + * One AKS cluster for ScalarDL Ledger + * One AKS cluster for ScalarDL Auditor +* Configure the AKS clusters based on the version of Kubernetes and your project's requirements. +* Configure a virtual network (VNet) as follows. + * Connect the **VNet of AKS (for Ledger)** and the **VNet of AKS (for Auditor)** by using [virtual network peering](https://learn.microsoft.com/en-us/azure/virtual-network/virtual-network-manage-peering). To do so, you must specify the different IP ranges for the **VNet of AKS (for Ledger)** and the **VNet of AKS (for Auditor)** when you create those VNets. + * Allow **connections between Ledger and Auditor** to make ScalarDL (Auditor mode) work properly. + * For more details about these network requirements, refer to [Configure Network Peering for ScalarDL Auditor Mode](./NetworkPeeringForScalarDLAuditor.md). + +{% capture notice--warning %} +**Attention** + +For Byzantine fault detection in ScalarDL to work properly, do not deploy your application pods on the same AKS clusters as the ScalarDL Ledger and ScalarDL Auditor deployments. +{% endcapture %} + +
{{ notice--warning | markdownify }}
+ +## Recommendations (optional) + +The following are some recommendations for deploying ScalarDL Ledger and ScalarDL Auditor. These recommendations are not required, so you can choose whether or not to apply these recommendations based on your needs. + +### Create at least three worker nodes and three pods per AKS cluster + +To ensure that the AKS cluster has high availability, you should use at least three worker nodes and deploy at least three pods spread across the worker nodes. You can see the [ScalarDL Ledger sample configurations](../conf/scalardl-custom-values.yaml) and [ScalarDL Auditor sample configurations](../conf/scalardl-audit-custom-values.yaml) of `podAntiAffinity` for making three pods spread across the worker nodes. + +{% capture notice--info %} +**Note** + +If you place the worker nodes in different [availability zones](https://learn.microsoft.com/en-us/azure/availability-zones/az-overview) (AZs), you can withstand an AZ failure. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Use 4vCPU / 8GB memory nodes for the worker node in the ScalarDL Ledger and ScalarDL Auditor node pool + +From the perspective of commercial licenses, resources for each pod running ScalarDL Ledger or ScalarDL Auditor are limited to 2vCPU / 4GB memory. In addition to the ScalarDL Ledger and ScalarDL Auditor pods, Kubernetes could deploy some of the following components to each worker node: + +* AKS cluster for ScalarDL Ledger + * ScalarDL Ledger pod (2vCPU / 4GB) + * Envoy proxy + * Monitoring components (if you deploy monitoring components such as `kube-prometheus-stack`) + * Kubernetes components +* AKS cluster for ScalarDL Auditor + * ScalarDL Auditor pod (2vCPU / 4GB) + * Envoy proxy + * Monitoring components (if you deploy monitoring components such as `kube-prometheus-stack`) + * Kubernetes components + +With this in mind, you should use a worker node that has at least 4vCPU / 8GB memory resources and use at least three worker nodes for availability, as mentioned in [Create at least three worker nodes and three pods](#create-at-least-three-worker-nodes-and-three-pods-per-aks-cluster). And remember, for Byzantine fault detection to work properly, you cannot deploy your application pods on the same AKS clusters as the ScalarDL Ledger and ScalarDL Auditor deployments. + +However, three nodes with at least 4vCPU / 8GB memory resources per node is the minimum environment for production. You should also consider the resources of the AKS cluster (for example, the number of worker nodes, vCPUs per node, memory per node, ScalarDL Ledger pods, and ScalarDL Auditor pods), which depend on your system's workload. In addition, if you plan to scale the pods automatically by using some features like [Horizontal Pod Autoscaling (HPA)](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/), you should consider the maximum number of pods on the worker node when deciding the worker node resources. + +### Create node pools for ScalarDL Ledger and ScalarDL Auditor pods + +AKS creates one system node pool named **agentpool** that is preferred for system pods (used to keep AKS running) by default. We recommend creating additional node pools with **user** mode for ScalarDL Ledger and ScalarDL Auditor pods and deploying ScalarDL Ledger and ScalarDL Auditor pods on those additional node pools. + +### Configure cluster autoscaler in AKS + +If you want to scale ScalarDL Ledger and ScalarDL Auditor pods automatically by using [Horizontal Pod Autoscaler)](https://learn.microsoft.com/en-us/azure/aks/concepts-scale#horizontal-pod-autoscaler), you should configure cluster autoscaler in AKS too. For details, refer to the official Microsoft documentation at [Cluster autoscaler](https://learn.microsoft.com/en-us/azure/aks/concepts-scale#cluster-autoscaler). + +In addition, if you configure cluster autoscaler, you should create a subnet in a VNet for AKS to ensure a sufficient number of IPs exist so that AKS can work without network issues after scaling. The required number of IPs varies depending on the networking plug-in. For more details about the number of IPs required, refer to the following: + +* [Use kubenet networking with your own IP address ranges in Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/configure-kubenet) +* [Configure Azure CNI networking in Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/configure-azure-cni) + +### Create the AKS cluster on a private network + +You should create the AKS cluster on a private network (private subnet in a VNet) since ScalarDL Ledger and ScalarDL Auditor do not provide any services to users directly via internet access. We recommend accessing ScalarDL Ledger and ScalarDL Auditor via a private network from your applications. + +### Create the AKS cluster by using Azure CNI, if necessary + +The AKS default networking plug-in is [kubenet](https://learn.microsoft.com/en-us/azure/aks/configure-kubenet). If your requirement does not match kubenet, you should use [Azure Container Networking Interface (CNI)](https://learn.microsoft.com/en-us/azure/aks/configure-azure-cni). + +For example, if you want to deploy multiple ScalarDL Ledger and ScalarDL Auditor environments on only one AKS cluster instead of two AKS clusters (e.g., deploy multi-tenant ScalarDL) and control the connection between each tenant by using [Kubernetes NetworkPolicies](https://kubernetes.io/docs/concepts/services-networking/network-policies/), kubenet supports only the Calico Network Policy, which the [Azure support team does not support](https://learn.microsoft.com/en-us/azure/aks/use-network-policies#differences-between-azure-network-policy-manager-and-calico-network-policy-and-their-capabilities). Please note that the Calico Network Policy is supported only by the Calico community or through additional paid support. + +The Azure support and engineering teams, however, do support Azure CNI. So, if you want to use Kubernetes NetworkPolicies and receive support from the Azure support team, you should use Azure CNI. For more details about the differences between kubenet and Azure CNI, refer to the following official Microsoft documentation: + +* [Network concepts for applications in Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/concepts-network) +* [Use kubenet networking with your own IP address ranges in Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/configure-kubenet) +* [Configure Azure CNI networking in Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/configure-azure-cni) + +### Restrict connections by using some security features based on your requirements + +You should restrict unused connections in ScalarDL and ScalarDL Auditor. To restrict unused connections, you can use some security features of Azure, like [network security groups](https://learn.microsoft.com/en-us/azure/virtual-network/network-security-groups-overview). + +The connections (ports) that ScalarDL Ledger and ScalarDL Auditor use by default are as follows: + +* ScalarDL Ledger + * 50051/TCP (accepts requests from a client and ScalarDL Auditor) + * 50052/TCP (accepts privileged requests from a client and ScalarDL Auditor) + * 50053/TCP (accepts pause/unpause requests from a scalar-admin client tool) + * 8080/TCP (accepts monitoring requests) +* ScalarDL Auditor + * 40051/TCP (accepts requests from a client) + * 40052/TCP (accepts privileged requests from a client) + * 40053/TCP (accepts pause and unpause requests from a scalar-admin client tool) + * 8080/TCP (accepts monitoring requests) +* Scalar Envoy (used with ScalarDL Ledger and ScalarDL Auditor) + * 50051/TCP (load balancing for ScalarDL Ledger) + * 50052/TCP (load balancing for ScalarDL Ledger) + * 40051/TCP (load balancing for ScalarDL Auditor) + * 40052/TCP (load balancing for ScalarDL Auditor) + * 9001/TCP (accepts monitoring requests for Scalar Envoy itself) + +{% capture notice--info %} +**Note** + +- If you change the default listening port for ScalarDL Ledger and ScalarDL Auditor in their configuration files (`ledger.properties` and `auditor.properties`, respectively), you must allow connections by using the port that you configured. +- You must also allow the connections that AKS uses itself. For more details about AKS traffic requirements, refer to [Control egress traffic using Azure Firewall in Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/limit-egress-traffic). +{% endcapture %} + +
{{ notice--info | markdownify }}
diff --git a/docs/3.12/scalar-kubernetes/CreateAKSClusterForScalarProducts.md b/docs/3.12/scalar-kubernetes/CreateAKSClusterForScalarProducts.md new file mode 100644 index 00000000..3116f42f --- /dev/null +++ b/docs/3.12/scalar-kubernetes/CreateAKSClusterForScalarProducts.md @@ -0,0 +1,13 @@ +# Guidelines for creating an AKS cluster for Scalar products + +To create an Azure Kubernetes Service (AKS) cluster for Scalar products, refer to the following: + +* [Guidelines for creating an AKS cluster for ScalarDB Server](./CreateAKSClusterForScalarDB.md) +* [Guidelines for creating an AKS cluster for ScalarDL Ledger](./CreateAKSClusterForScalarDL.md) +* [Guidelines for creating an AKS cluster for ScalarDL Ledger and ScalarDL Auditor](./CreateAKSClusterForScalarDLAuditor.md) + +To deploy Scalar products on AKS, refer to the following: + +* [Deploy ScalarDB Server on AKS](./ManualDeploymentGuideScalarDBServerOnAKS.md) +* [Deploy ScalarDL Ledger on AKS](./ManualDeploymentGuideScalarDLOnAKS.md) +* [Deploy ScalarDL Ledger and ScalarDL Auditor on AKS](./ManualDeploymentGuideScalarDLAuditorOnAKS.md) diff --git a/docs/3.12/scalar-kubernetes/CreateBastionServer.md b/docs/3.12/scalar-kubernetes/CreateBastionServer.md new file mode 100644 index 00000000..4da8b944 --- /dev/null +++ b/docs/3.12/scalar-kubernetes/CreateBastionServer.md @@ -0,0 +1,44 @@ +# Create a bastion server + +This document explains how to create a bastion server and install some tools for the deployment of Scalar products. + +## Create a server on the same private network as a Kubernetes cluster + +It is recommended to create a Kubernetes cluster for Scalar products on a private network. If you create a Kubernetes cluster on a private network, you should create a bastion server on the same private network to access your Kubernetes cluster. + +## Install tools + +Please install the following tools on the bastion server according to their official documents. + +* [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) +* [helm](https://helm.sh/docs/intro/install/) + +## Configure kubeconfig + +After you install the kubectl command, you must configure a **kubeconfig** to access your Kubernetes cluster. Please refer to the following official document for more details on how to configure kubeconfig in each managed Kubernetes. + +If you use Amazon EKS (Amazon Elastic Kubernetes Service), you must install the **AWS CLI** according to the official document [Installing or updating the latest version of the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html). After that, you can see how to configure kubeconfig in [Creating or updating a kubeconfig file for an Amazon EKS cluster](https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html). + +If you use AKS (Azure Kubernetes Service), you must install the **Azure CLI** according to the official document [How to install the Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli). After that, you can see how to configure kubeconfig in [az aks get-credentials](https://learn.microsoft.com/en-us/cli/azure/aks?view=azure-cli-latest#az-aks-get-credentials). + +## Check installation + +You can check if the tools are installed as follows. + +* kubectl + + ```console + kubectl version --client + ``` + +* helm + + ```console + helm version + ``` + +You can also check if your kubeconfig is properly configured as follows. If you see a URL response, kubectl is correctly configured to access your cluster. + +```console +kubectl cluster-info +``` diff --git a/docs/3.12/scalar-kubernetes/CreateEKSClusterForScalarDB.md b/docs/3.12/scalar-kubernetes/CreateEKSClusterForScalarDB.md new file mode 100644 index 00000000..bd868cdb --- /dev/null +++ b/docs/3.12/scalar-kubernetes/CreateEKSClusterForScalarDB.md @@ -0,0 +1,84 @@ +# (Deprecated) Guidelines for creating an EKS cluster for ScalarDB Server + +{% capture notice--warning %} +**Attention** + +ScalarDB Server is now deprecated. Please use [ScalarDB Cluster](./ManualDeploymentGuideScalarDBClusterOnEKS.md) instead. +{% endcapture %} + +
{{ notice--warning | markdownify }}
+ +This document explains the requirements and recommendations for creating an Amazon Elastic Kubernetes Service (EKS) cluster for ScalarDB Server deployment. For details on how to deploy ScalarDB Server on an EKS cluster, see [Deploy ScalarDB Server on Amazon EKS](./ManualDeploymentGuideScalarDBServerOnEKS.md). + +## Before you begin + +You must create an EKS cluster based on the following requirements, recommendations, and your project's requirements. For specific details about how to create an EKS cluster, see the official Amazon documentation at [Creating an Amazon EKS cluster](https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html). + +## Requirements + +When deploying ScalarDB Server, you must: + +* Create the EKS cluster by using Kubernetes version 1.21 or higher. +* Configure the EKS cluster based on the version of Kubernetes and your project's requirements. + +## Recommendations (optional) + +The following are some recommendations for deploying ScalarDB Server. These recommendations are not required, so you can choose whether or not to apply these recommendations based on your needs. + +### Create at least three worker nodes and three pods + +To ensure that the EKS cluster has high availability, you should use at least three worker nodes and deploy at least three pods spread across the worker nodes. You can see the [sample configurations](../conf/scalardb-custom-values.yaml) of `podAntiAffinity` for making three pods spread across the worker nodes. + +{% capture notice--info %} +**Note** + +If you place the worker nodes in different [availability zones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) (AZs), you can withstand an AZ failure. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Use 4vCPU / 8GB memory nodes for the worker node in the ScalarDB Server node group + +From the perspective of commercial licenses, resources for one pod running ScalarDB Server are limited to 2vCPU / 4GB memory. In addition to the ScalarDB Server pod, Kubernetes could deploy some of the following components to each worker node: + +* ScalarDB Server pod (2vCPU / 4GB) +* Envoy proxy +* Your application pods (if you choose to run your application's pods on the same worker node) +* Monitoring components (if you deploy monitoring components such as `kube-prometheus-stack`) +* Kubernetes components + +With this in mind, you should use a worker node that has at least 4vCPU / 8GB memory resources and use at least three worker nodes for availability, as mentioned in [Create at least three worker nodes and three pods](#create-at-least-three-worker-nodes-and-three-pods). + +However, three nodes with at least 4vCPU / 8GB memory resources per node is the minimum for production environment. You should also consider the resources of the EKS cluster (for example, the number of worker nodes, vCPUs per node, memory per node, ScalarDB Server pods, and pods for your application), which depend on your system's workload. In addition, if you plan to scale the pods automatically by using some features like [Horizontal Pod Autoscaling (HPA)](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/), you should consider the maximum number of pods on the worker node when deciding the worker node resources. + +### Configure Cluster Autoscaler in EKS + +If you want to scale ScalarDB Server pods automatically by using [Horizontal Pod Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/horizontal-pod-autoscaler.html), you should configure Cluster Autoscaler in EKS too. For details, see the official Amazon documentation at [Autoscaling](https://docs.aws.amazon.com/eks/latest/userguide/autoscaling.html#cluster-autoscaler). + +In addition, if you configure Cluster Autoscaler, you should create a subnet in an Amazon Virtual Private Cloud (VPC) for EKS with the prefix (e.g., `/24`) to ensure a sufficient number of IPs exist so that EKS can work without network issues after scaling. + +### Create the EKS cluster on a private network + +You should create the EKS cluster on a private network (private subnet in a VPC) since ScalarDB Server does not provide any services to users directly via internet access. We recommend accessing ScalarDB Server via a private network from your applications. + +### Restrict connections by using some security features based on your requirements + +You should restrict unused connections in ScalarDB Server. To restrict unused connections, you can use some security features in AWS, like [security groups](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) and [network access control lists](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html). + +The connections (ports) that ScalarDB Server uses by default are as follows: + +* ScalarDB Server + * 60051/TCP (accepts requests from a client) + * 8080/TCP (accepts monitoring requests) +* Scalar Envoy (used with ScalarDB Server) + * 60051/TCP (load balancing for ScalarDB Server) + * 9001/TCP (accepts monitoring requests for Scalar Envoy itself) + +{% capture notice--info %} +**Note** + +- If you change the default listening port for ScalarDB Server in the configuration file (`database.properties`), you must allow connections by using the port that you configured. +- You must also allow the connections that EKS uses itself. For more details about Amazon EKS security group requirements, refer to [Amazon EKS security group requirements and considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html). +{% endcapture %} + +
{{ notice--info | markdownify }}
diff --git a/docs/3.12/scalar-kubernetes/CreateEKSClusterForScalarDBCluster.md b/docs/3.12/scalar-kubernetes/CreateEKSClusterForScalarDBCluster.md new file mode 100644 index 00000000..21904057 --- /dev/null +++ b/docs/3.12/scalar-kubernetes/CreateEKSClusterForScalarDBCluster.md @@ -0,0 +1,85 @@ +# Guidelines for creating an EKS cluster for ScalarDB Cluster + +This document explains the requirements and recommendations for creating an Amazon Elastic Kubernetes Service (EKS) cluster for ScalarDB Cluster deployment. For details on how to deploy ScalarDB Cluster on an EKS cluster, see [Deploy ScalarDB Cluster on Amazon EKS](./ManualDeploymentGuideScalarDBClusterOnEKS.md). + +## Before you begin + +You must create an EKS cluster based on the following requirements, recommendations, and your project's requirements. For specific details about how to create an EKS cluster, see the official Amazon documentation at [Creating an Amazon EKS cluster](https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html). + +## Requirements + +When deploying ScalarDB Cluster, you must: + +* Create the EKS cluster by using Kubernetes version 1.21 or higher. +* Configure the EKS cluster based on the version of Kubernetes and your project's requirements. + +## Recommendations (optional) + +The following are some recommendations for deploying ScalarDB Cluster. These recommendations are not required, so you can choose whether or not to apply these recommendations based on your needs. + +### Create at least three worker nodes and three pods + +To ensure that the EKS cluster has high availability, you should use at least three worker nodes and deploy at least three pods spread across the worker nodes. You can see the [sample configurations](../conf/scalardb-cluster-custom-values-indirect-mode.yaml) of `podAntiAffinity` for making three pods spread across the worker nodes. + +{% capture notice--info %} +**Note** + +If you place the worker nodes in different [availability zones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) (AZs), you can withstand an AZ failure. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Use 4vCPU / 8GB memory nodes for the worker node in the ScalarDB Cluster node group + +From the perspective of commercial licenses, resources for one pod running ScalarDB Cluster are limited to 2vCPU / 4GB memory. In addition to the ScalarDB Cluster pod, Kubernetes could deploy some of the following components to each worker node: + +* ScalarDB Cluster pod (2vCPU / 4GB) +* Envoy proxy (if you use `indirect` client mode or use a programming language other than Java) +* Your application pods (if you choose to run your application's pods on the same worker node) +* Monitoring components (if you deploy monitoring components such as `kube-prometheus-stack`) +* Kubernetes components + +{% capture notice--info %} +**Note** + +You do not need to deploy an Envoy pod when using `direct-kubernetes` mode. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +With this in mind, you should use a worker node that has at least 4vCPU / 8GB memory resources and use at least three worker nodes for availability, as mentioned in [Create at least three worker nodes and three pods](#create-at-least-three-worker-nodes-and-three-pods). + +However, three nodes with at least 4vCPU / 8GB memory resources per node is the minimum for production environment. You should also consider the resources of the EKS cluster (for example, the number of worker nodes, vCPUs per node, memory per node, ScalarDB Cluster pods, and pods for your application), which depend on your system's workload. In addition, if you plan to scale the pods automatically by using some features like [Horizontal Pod Autoscaling (HPA)](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/), you should consider the maximum number of pods on the worker node when deciding the worker node resources. + +### Configure Cluster Autoscaler in EKS + +If you want to scale ScalarDB Cluster pods automatically by using [Horizontal Pod Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/horizontal-pod-autoscaler.html), you should configure Cluster Autoscaler in EKS too. For details, see the official Amazon documentation at [Autoscaling](https://docs.aws.amazon.com/eks/latest/userguide/autoscaling.html#cluster-autoscaler). + +In addition, if you configure Cluster Autoscaler, you should create a subnet in an Amazon Virtual Private Cloud (VPC) for EKS with the prefix (e.g., `/24`) to ensure a sufficient number of IPs exist so that EKS can work without network issues after scaling. + +### Create the EKS cluster on a private network + +You should create the EKS cluster on a private network (private subnet in a VPC) since ScalarDB Cluster does not provide any services to users directly via internet access. We recommend accessing ScalarDB Cluster via a private network from your applications. + +### Restrict connections by using some security features based on your requirements + +You should restrict unused connections in ScalarDB Cluster. To restrict unused connections, you can use some security features in AWS, like [security groups](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) and [network access control lists](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html). + +The connections (ports) that ScalarDB Cluster uses by default are as follows: + +* ScalarDB Cluster + * 60053/TCP (accepts gRPC or SQL requests from a client) + * 8080/TCP (accepts GraphQL requests from a client) + * 9080/TCP (accepts monitoring requests) +* Scalar Envoy (used with ScalarDB Cluster `indirect` mode) + * 60053/TCP (load balancing for ScalarDB Cluster) + * 9001/TCP (accepts monitoring requests for Scalar Envoy itself) + +{% capture notice--info %} +**Note** + +- If you change the default listening port for ScalarDB Cluster in the configuration file (`scalardb-cluster-node.properties`), you must allow connections by using the port that you configured. +- You must also allow the connections that EKS uses itself. For more details about Amazon EKS security group requirements, refer to [Amazon EKS security group requirements and considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html). +{% endcapture %} + +
{{ notice--info | markdownify }}
diff --git a/docs/3.12/scalar-kubernetes/CreateEKSClusterForScalarDL.md b/docs/3.12/scalar-kubernetes/CreateEKSClusterForScalarDL.md new file mode 100644 index 00000000..db421788 --- /dev/null +++ b/docs/3.12/scalar-kubernetes/CreateEKSClusterForScalarDL.md @@ -0,0 +1,86 @@ +# Guidelines for creating an EKS cluster for ScalarDL Ledger + +This document explains the requirements and recommendations for creating an Amazon Elastic Kubernetes Service (EKS) cluster for ScalarDL Ledger deployment. For details on how to deploy ScalarDL Ledger on an EKS cluster, see [Deploy ScalarDL Ledger on Amazon EKS](./ManualDeploymentGuideScalarDLOnEKS.md). + +## Before you begin + +You must create an EKS cluster based on the following requirements, recommendations, and your project's requirements. For specific details about how to create an EKS cluster, see the official Amazon documentation at [Creating an Amazon EKS cluster](https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html). + +## Requirements + +When deploying ScalarDL Ledger, you must: + +* Create the EKS cluster by using Kubernetes version 1.21 or higher. +* Configure the EKS cluster based on the version of Kubernetes and your project's requirements. + +{% capture notice--warning %} +**Attention** + +For Byzantine fault detection in ScalarDL to work properly, do not deploy your application pods on the same EKS cluster as the ScalarDL Ledger deployment. +{% endcapture %} + +
{{ notice--warning | markdownify }}
+ +## Recommendations (optional) + +The following are some recommendations for deploying ScalarDL Ledger. These recommendations are not required, so you can choose whether or not to apply these recommendations based on your needs. + +### Create at least three worker nodes and three pods + +To ensure that the EKS cluster has high availability, you should use at least three worker nodes and deploy at least three pods spread across the worker nodes. You can see the [sample configurations](../conf/scalardl-custom-values.yaml) of `podAntiAffinity` for making three pods spread across the worker nodes. + +{% capture notice--info %} +**Note** + +If you place the worker nodes in different [availability zones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) (AZs), you can withstand an AZ failure. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Use 4vCPU / 8GB memory nodes for the worker node in the ScalarDL Ledger node group + +From the perspective of commercial licenses, resources for one pod running ScalarDL Ledger are limited to 2vCPU / 4GB memory. In addition to the ScalarDL Ledger pod, Kubernetes could deploy some of the following components to each worker node: + +* ScalarDL Ledger pod (2vCPU / 4GB) +* Envoy proxy +* Monitoring components (if you deploy monitoring components such as `kube-prometheus-stack`) +* Kubernetes components + +With this in mind, you should use a worker node that has at least 4vCPU / 8GB memory resources and use at least three worker nodes for availability, as mentioned in [Create at least three worker nodes and three pods](#create-at-least-three-worker-nodes-and-three-pods). + +However, three nodes with at least 4vCPU / 8GB memory resources per node is the minimum environment for production. You should also consider the resources of the EKS cluster (for example, the number of worker nodes, vCPUs per node, memory per node, and ScalarDL Ledger pods), which depend on your system's workload. In addition, if you plan to scale the pods automatically by using some features like [Horizontal Pod Autoscaling (HPA)](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/), you should consider the maximum number of pods on the worker node when deciding the worker node resources. + +### Configure Cluster Autoscaler in EKS + +If you want to scale ScalarDL Ledger pods automatically by using [Horizontal Pod Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/horizontal-pod-autoscaler.html), you should configure Cluster Autoscaler in EKS too. For details, see the official Amazon documentation at [Autoscaling](https://docs.aws.amazon.com/eks/latest/userguide/autoscaling.html#cluster-autoscaler). + +In addition, if you configure Cluster Autoscaler, you should create a subnet in an Amazon Virtual Private Cloud (VPC) for EKS with the prefix (e.g., `/24`) to ensure a sufficient number of IPs exist so that EKS can work without network issues after scaling. + +### Create the EKS cluster on a private network + +You should create the EKS cluster on a private network (private subnet in a VPC) since ScalarDL Ledger does not provide any services to users directly via internet access. We recommend accessing ScalarDL Ledger via a private network from your applications. + +### Restrict connections by using some security features based on your requirements + +You should restrict unused connections in ScalarDL Ledger. To restrict unused connections, you can use some security features in AWS, like [security groups](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) and [network access control lists](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html). + +The connections (ports) that ScalarDL Ledger uses by default are as follows: + +* ScalarDL Ledger + * 50051/TCP (Accept the requests from a client) + * 50052/TCP (accepts privileged requests from a client) + * 50053/TCP (accepts pause and unpause requests from a scalar-admin client tool) + * 8080/TCP (accepts monitoring requests) +* Scalar Envoy (used with ScalarDL Ledger) + * 50051/TCP (load balancing for ScalarDL Ledger) + * 50052/TCP (load balancing for ScalarDL Ledger) + * 9001/TCP (accepts monitoring requests for Scalar Envoy itself) + +{% capture notice--info %} +**Note** + +- If you change the default listening port for ScalarDL Ledger in the configuration file (`ledger.properties`), you must allow connections by using the port that you configured. +- You must also allow the connections that EKS uses itself. For more details about Amazon EKS security group requirements, refer to [Amazon EKS security group requirements and considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html). +{% endcapture %} + +
{{ notice--info | markdownify }}
diff --git a/docs/3.12/scalar-kubernetes/CreateEKSClusterForScalarDLAuditor.md b/docs/3.12/scalar-kubernetes/CreateEKSClusterForScalarDLAuditor.md new file mode 100644 index 00000000..2f6cae05 --- /dev/null +++ b/docs/3.12/scalar-kubernetes/CreateEKSClusterForScalarDLAuditor.md @@ -0,0 +1,105 @@ +# Guidelines for creating an EKS cluster for ScalarDL Ledger and ScalarDL Auditor + +This document explains the requirements and recommendations for creating an Amazon Elastic Kubernetes Service (EKS) cluster for ScalarDL Ledger and ScalarDL Auditor deployment. For details on how to deploy ScalarDL Ledger and ScalarDL Auditor on an EKS cluster, see [Deploy ScalarDL Ledger and ScalarDL Auditor on Amazon EKS](./ManualDeploymentGuideScalarDLAuditorOnEKS.md). + +## Before you begin + +You must create an EKS cluster based on the following requirements, recommendations, and your project's requirements. For specific details about how to create an EKS cluster, see the official Amazon documentation at [Creating an Amazon EKS cluster](https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html). + +## Requirements + +When deploying ScalarDL Ledger and ScalarDL Auditor, you must: + +* Create two EKS clusters by using Kubernetes version 1.21 or higher. + * One EKS cluster for ScalarDL Ledger + * One EKS cluster for ScalarDL Auditor +* Configure the EKS clusters based on the version of Kubernetes and your project's requirements. +* Configure an Amazon Virtual Private Cloud (VPC) as follows. + * Connect the **VPC of EKS (for Ledger)** and the **VPC of EKS (for Auditor)** by using [VPC peering](https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html). To do so, you must specify the different IP ranges for the **VPC of EKS (for Ledger)** and the **VPC of EKS (for Auditor)** when you create those VPCs. + * Allow **connections between Ledger and Auditor** to make ScalarDL (Auditor mode) work properly. + * For more details about these network requirements, refer to [Configure Network Peering for ScalarDL Auditor Mode](./NetworkPeeringForScalarDLAuditor.md). + +{% capture notice--warning %} +**Attention** + +For Byzantine fault detection in ScalarDL to work properly, do not deploy your application pods on the same EKS clusters as the ScalarDL Ledger and ScalarDL Auditor deployments. +{% endcapture %} + +
{{ notice--warning | markdownify }}
+ +## Recommendations (optional) + +The following are some recommendations for deploying ScalarDL Ledger and ScalarDL Auditor. These recommendations are not required, so you can choose whether or not to apply these recommendations based on your needs. + +### Create at least three worker nodes and three pods per EKS cluster + +To ensure that the EKS cluster has high availability, you should use at least three worker nodes and deploy at least three pods spread across the worker nodes. You can see the [ScalarDL Ledger sample configurations](../conf/scalardl-custom-values.yaml) and [ScalarDL Auditor sample configurations](../conf/scalardl-audit-custom-values.yaml) of `podAntiAffinity` for making three pods spread across the worker nodes. + +{% capture notice--info %} +**Note** + +If you place the worker nodes in different [availability zones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) (AZs), you can withstand an AZ failure. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Use 4vCPU / 8GB memory nodes for the worker node in the ScalarDL Ledger and ScalarDL Auditor node group + +From the perspective of commercial licenses, resources for each pod running ScalarDL Ledger or ScalarDL Auditor are limited to 2vCPU / 4GB memory. In addition to the ScalarDL Ledger and ScalarDL Auditor pods, Kubernetes could deploy some of the following components to each worker node: + +* EKS cluster for ScalarDL Ledger + * ScalarDL Ledger pod (2vCPU / 4GB) + * Envoy proxy + * Monitoring components (if you deploy monitoring components such as `kube-prometheus-stack`) + * Kubernetes components +* EKS cluster for ScalarDL Auditor + * ScalarDL Auditor pod (2vCPU / 4GB) + * Envoy proxy + * Monitoring components (if you deploy monitoring components such as `kube-prometheus-stack`) + * Kubernetes components + +With this in mind, you should use a worker node that has at least 4vCPU / 8GB memory resources and use at least three worker nodes for availability, as mentioned in [Create at least three worker nodes and three pods](#create-at-least-three-worker-nodes-and-three-pods-per-eks-cluster). And remember, for Byzantine fault detection to work properly, you cannot deploy your application pods on the same EKS clusters as the ScalarDL Ledger and ScalarDL Auditor deployments. + +However, three nodes with at least 4vCPU / 8GB memory resources per node is a minimum environment for production. You should also consider the resources of the EKS cluster (for example, the number of worker nodes, vCPUs per node, memory per node, ScalarDL Ledger pods, and ScalarDL Auditor pods), which depend on your system's workload. In addition, if you plan to scale the pods automatically by using some features like [Horizontal Pod Autoscaling (HPA)](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/), you should consider the maximum number of pods on the worker node when deciding the worker node resources. + +### Configure Cluster Autoscaler in EKS + +If you want to scale ScalarDL Ledger or ScalarDL Auditor pods automatically by using [Horizontal Pod Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/horizontal-pod-autoscaler.html), you should configure Cluster Autoscaler in EKS too. For details, see the official Amazon documentation at [Autoscaling](https://docs.aws.amazon.com/eks/latest/userguide/autoscaling.html#cluster-autoscaler). + +In addition, if you configure Cluster Autoscaler, you should create a subnet in a VPC for EKS with the prefix (e.g., `/24`) to ensure a sufficient number of IPs exist so that EKS can work without network issues after scaling. + +### Create the EKS cluster on a private network + +You should create the EKS cluster on a private network (private subnet in a VPC) since ScalarDL Ledger and ScalarDL Auditor do not provide any services to users directly via internet access. We recommend accessing ScalarDL Ledger and ScalarDL Auditor via a private network from your applications. + +### Restrict connections by using some security features based on your requirements + +You should restrict unused connections in ScalarDL Ledger and ScalarDL Auditor. To restrict unused connections, you can use some security features in AWS, like [security groups](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) and [network access control lists](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html). + +The connections (ports) that ScalarDL Ledger and ScalarDL Auditor use by default are as follows: + +* ScalarDL Ledger + * 50051/TCP (accepts requests from a client and ScalarDL Auditor) + * 50052/TCP (accepts privileged requests from a client and ScalarDL Auditor) + * 50053/TCP (accepts pause and unpause requests from a scalar-admin client tool) + * 8080/TCP (accepts monitoring requests) +* ScalarDL Auditor + * 40051/TCP (accepts requests from a client) + * 40052/TCP (accepts privileged requests from a client) + * 40053/TCP (accepts pause and unpause requests from a scalar-admin client tool) + * 8080/TCP (accepts monitoring requests) +* Scalar Envoy (used with ScalarDL Ledger and ScalarDL Auditor) + * 50051/TCP (load balancing for ScalarDL Ledger) + * 50052/TCP (load balancing for ScalarDL Ledger) + * 40051/TCP (load balancing for ScalarDL Auditor) + * 40052/TCP (load balancing for ScalarDL Auditor) + * 9001/TCP (accepts monitoring requests for Scalar Envoy itself) + +{% capture notice--info %} +**Note** + +- If you change the default listening port for ScalarDL Ledger and ScalarDL Auditor in their configuration files (`ledger.properties` and `auditor.properties`, respectively), you must allow the connections by using the port that you configured. +- You must also allow the connections that EKS uses itself. For more details about Amazon EKS security group requirements, refer to [Amazon EKS security group requirements and considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html). +{% endcapture %} + +
{{ notice--info | markdownify }}
diff --git a/docs/3.12/scalar-kubernetes/CreateEKSClusterForScalarProducts.md b/docs/3.12/scalar-kubernetes/CreateEKSClusterForScalarProducts.md new file mode 100644 index 00000000..0560f383 --- /dev/null +++ b/docs/3.12/scalar-kubernetes/CreateEKSClusterForScalarProducts.md @@ -0,0 +1,14 @@ +# Guidelines for creating an Amazon EKS cluster for Scalar products + +To create an Amazon Elastic Kubernetes Service (EKS) cluster for Scalar products, refer to the following: + +* [Guidelines for creating an EKS cluster for ScalarDB Cluster](./CreateEKSClusterForScalarDBCluster.md) +* [(Deprecated) Guidelines for creating an EKS cluster for ScalarDB Server](./CreateEKSClusterForScalarDB.md) +* [Guidelines for creating an EKS cluster for ScalarDL Ledger](./CreateEKSClusterForScalarDL.md) +* [Guidelines for creating an EKS cluster for ScalarDL Ledger and ScalarDL Auditor](./CreateEKSClusterForScalarDLAuditor.md) + +To deploy Scalar products on Amazon EKS, refer to the following: + +* [Deploy ScalarDB Server on Amazon EKS (Amazon Elastic Kubernetes Service)](./ManualDeploymentGuideScalarDBServerOnEKS.md) +* [Deploy ScalarDL Ledger on Amazon EKS (Amazon Elastic Kubernetes Service)](./ManualDeploymentGuideScalarDLOnEKS.md) +* [Deploy ScalarDL Ledger and ScalarDL Auditor on Amazon EKS (Amazon Elastic Kubernetes Service)](./ManualDeploymentGuideScalarDLAuditorOnEKS.md) diff --git a/docs/3.12/scalar-kubernetes/K8sLogCollectionGuide.md b/docs/3.12/scalar-kubernetes/K8sLogCollectionGuide.md new file mode 100644 index 00000000..7703967a --- /dev/null +++ b/docs/3.12/scalar-kubernetes/K8sLogCollectionGuide.md @@ -0,0 +1,161 @@ +# Collecting logs from Scalar products on a Kubernetes cluster + +This document explains how to deploy Grafana Loki and Promtail on Kubernetes with Helm. After following this document, you can collect logs of Scalar products on your Kubernetes environment. + +If you use a managed Kubernetes cluster and you want to use the cloud service features for monitoring and logging, please refer to the following document. + +* [Logging and monitoring on Amazon EKS](https://docs.aws.amazon.com/prescriptive-guidance/latest/implementing-logging-monitoring-cloudwatch/amazon-eks-logging-monitoring.html) +* [Monitoring Azure Kubernetes Service (AKS) with Azure Monitor](https://learn.microsoft.com/en-us/azure/aks/monitor-aks) + +## Prerequisites + +* Create a Kubernetes cluster. + * [Create an EKS cluster for Scalar products](./CreateEKSClusterForScalarProducts.md) + * [Create an AKS cluster for Scalar products](./CreateAKSClusterForScalarProducts.md) +* Create a Bastion server and set `kubeconfig`. + * [Create a bastion server](./CreateBastionServer.md) +* Deploy Prometheus Operator (we use Grafana to explore collected logs) + * [Monitoring Scalar products on the Kubernetes cluster](./K8sMonitorGuide.md) + +## Add the grafana helm repository + +This document uses Helm for the deployment of Prometheus Operator. + +```console +helm repo add grafana https://grafana.github.io/helm-charts +``` + +```console +helm repo update +``` + +## Prepare a custom values file + +Please get the sample file [scalar-loki-stack-custom-values.yaml](https://github.com/scalar-labs/scalar-kubernetes/blob/master/conf/scalar-loki-stack-custom-values.yaml) for loki-stack. For the logging of Scalar products, this sample file's configuration is recommended. + +### Set nodeSelector in the custom values file (Recommended in the production environment) + +In the production environment, it is recommended to add labels to the worker node for Scalar products as follows. + +* [EKS - Add a label to the worker node that is used for nodeAffinity](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/CreateEKSClusterForScalarProducts.md#add-a-label-to-the-worker-node-that-is-used-for-nodeaffinity) +* [AKS - Add a label to the worker node that is used for nodeAffinity](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/CreateAKSClusterForScalarProducts.md#add-a-label-to-the-worker-node-that-is-used-for-nodeaffinity) + +Since the promtail pods deployed in this document collect only Scalar product logs, it is sufficient to deploy promtail pods only on the worker node where Scalar products are running. So, you should set nodeSelector in the custom values file (scalar-loki-stack-custom-values.yaml) as follows if you add labels to your Kubernetes worker node. + +* ScalarDB Cluster Example + + ```yaml + promtail: + nodeSelector: + scalar-labs.com/dedicated-node: scalardb-cluster + ``` + +* (Deprecated) ScalarDB Server Example + + ```yaml + promtail: + nodeSelector: + scalar-labs.com/dedicated-node: scalardb + ``` + +* ScalarDL Ledger Example + + ```yaml + promtail: + nodeSelector: + scalar-labs.com/dedicated-node: scalardl-ledger + ``` + +* ScalarDL Auditor Example + + ```yaml + promtail: + nodeSelector: + scalar-labs.com/dedicated-node: scalardl-auditor + ``` + +### Set tolerations in the custom values file (Recommended in the production environment) + +In the production environment, it is recommended to add taints to the worker node for Scalar products as follows. + +* [EKS - Add taint to the worker node that is used for toleration](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/CreateEKSClusterForScalarProducts.md#add-taint-to-the-worker-node-that-is-used-for-toleration) +* [AKS - Add taint to the worker node that is used for toleration](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/CreateAKSClusterForScalarProducts.md#add-taint-to-the-worker-node-that-is-used-for-toleration) + +Since promtail pods are deployed as DaemonSet, you must set tolerations in the custom values file (scalar-loki-stack-custom-values.yaml) as follows if you add taints to your Kubernetes worker node. + +* ScalarDB Cluster Example + + ```yaml + promtail: + tolerations: + - effect: NoSchedule + key: scalar-labs.com/dedicated-node + operator: Equal + value: scalardb-cluster + ``` + +* (Deprecated) ScalarDB Server Example + + ```yaml + promtail: + tolerations: + - effect: NoSchedule + key: scalar-labs.com/dedicated-node + operator: Equal + value: scalardb + ``` + +* ScalarDL Ledger Example + + ```yaml + promtail: + tolerations: + - effect: NoSchedule + key: scalar-labs.com/dedicated-node + operator: Equal + value: scalardl-ledger + ``` + +* ScalarDL Auditor Example + + ```yaml + promtail: + tolerations: + - effect: NoSchedule + key: scalar-labs.com/dedicated-node + operator: Equal + value: scalardl-auditor + ``` + +## Deploy Loki and Promtail + +It is recommended to deploy Loki and Promtail on the same namespace `monitoring` as Prometheus and Grafana. You have already created the `monitoring` namespace in the document [Monitoring Scalar products on the Kubernetes cluster](./K8sMonitorGuide.md). + +```console +helm install scalar-logging-loki grafana/loki-stack -n monitoring -f scalar-loki-stack-custom-values.yaml +``` + +## Check if Loki and Promtail are deployed + +If the Loki and Promtail pods are deployed properly, you can see the `STATUS` is `Running` using the `kubectl get pod -n monitoring` command. Since promtail pods are deployed as DaemonSet, the number of promtail pods depends on the number of Kubernetes nodes. In the following example, there are three worker nodes for Scalar products in the Kubernetes cluster. + +``` +$ kubectl get pod -n monitoring +NAME READY STATUS RESTARTS AGE +scalar-logging-loki-0 1/1 Running 0 35m +scalar-logging-loki-promtail-2fnzn 1/1 Running 0 32m +scalar-logging-loki-promtail-2pwkx 1/1 Running 0 30m +scalar-logging-loki-promtail-gfx44 1/1 Running 0 32m +``` + +## View log in Grafana dashboard + +You can see the collected logs in the Grafana dashboard as follows. + +1. Access the Grafana dashboard +1. Go to the `Explore` page +1. Select `Loki` from the top left pull-down +1. Set conditions to query logs +1. Select the `Run query` button at the top right + +Please refer to the [Monitoring Scalar products on the Kubernetes cluster](./K8sMonitorGuide.md) for more details on how to access the Grafana dashboard. diff --git a/docs/3.12/scalar-kubernetes/K8sMonitorGuide.md b/docs/3.12/scalar-kubernetes/K8sMonitorGuide.md new file mode 100644 index 00000000..97a942e1 --- /dev/null +++ b/docs/3.12/scalar-kubernetes/K8sMonitorGuide.md @@ -0,0 +1,161 @@ +# Monitoring Scalar products on a Kubernetes cluster + +This document explains how to deploy Prometheus Operator on Kubernetes with Helm. After following this document, you can use Prometheus, Alertmanager, and Grafana for monitoring Scalar products on your Kubernetes environment. + +If you use a managed Kubernetes cluster and you want to use the cloud service features for monitoring and logging, please refer to the following document. + +* [Logging and monitoring on Amazon EKS](https://docs.aws.amazon.com/prescriptive-guidance/latest/implementing-logging-monitoring-cloudwatch/amazon-eks-logging-monitoring.html) +* [Monitoring Azure Kubernetes Service (AKS) with Azure Monitor](https://learn.microsoft.com/en-us/azure/aks/monitor-aks) + +## Prerequisites + +* Create a Kubernetes cluster. + * [Create an EKS cluster for Scalar products](./CreateEKSClusterForScalarProducts.md) + * [Create an AKS cluster for Scalar products](./CreateAKSClusterForScalarProducts.md) +* Create a Bastion server and set `kubeconfig`. + * [Create a bastion server](./CreateBastionServer.md) + +## Add the prometheus-community helm repository + +This document uses Helm for the deployment of Prometheus Operator. + +```console +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +``` + +```console +helm repo update +``` + +## Prepare a custom values file + +Please get the sample file [scalar-prometheus-custom-values.yaml](https://github.com/scalar-labs/scalar-kubernetes/blob/master/conf/scalar-prometheus-custom-values.yaml) for kube-prometheus-stack. For the monitoring of Scalar products, this sample file's configuration is recommended. + +In this sample file, the Service resources are not exposed to access from outside of a Kubernetes cluster. If you want to access dashboards from outside of your Kubernetes cluster, you must set `*.service.type` to `LoadBalancer` or `*.ingress.enabled` to `true`. + +Please refer to the following official document for more details on the configurations of kube-prometheus-stack. + +* [kube-prometheus-stack - Configuration](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#configuration) + +## Deploy Prometheus Operator + +Scalar products assume the Prometheus Operator is deployed in the `monitoring` namespace by default. So, please create the namespace `monitoring` and deploy Prometheus Operator in the `monitoring` namespace. + +1. Create a namespace `monitoring` on Kubernetes. + + ```console + kubectl create namespace monitoring + ``` + +1. Deploy the kube-prometheus-stack. + + ```console + helm install scalar-monitoring prometheus-community/kube-prometheus-stack -n monitoring -f scalar-prometheus-custom-values.yaml + ``` + +## Check if the Prometheus Operator is deployed + +If the Prometheus Operator (includes Prometheus, Alertmanager, and Grafana) pods are deployed properly, you can see the `STATUS` is `Running` using the `kubectl get pod -n monitoring` command. + +``` +$ kubectl get pod -n monitoring +NAME READY STATUS RESTARTS AGE +alertmanager-scalar-monitoring-kube-pro-alertmanager-0 2/2 Running 0 55s +prometheus-scalar-monitoring-kube-pro-prometheus-0 2/2 Running 0 55s +scalar-monitoring-grafana-cb4f9f86b-jmkpz 3/3 Running 0 62s +scalar-monitoring-kube-pro-operator-865bbb8454-9ppkc 1/1 Running 0 62s +``` + +## Deploy (or Upgrade) Scalar products using Helm Charts + +1. To enable Prometheus monitoring for Scalar products, you must set `true` to the following configurations in the custom values file. + + * Configurations + * `*.prometheusRule.enabled` + * `*.grafanaDashboard.enabled` + * `*.serviceMonitor.enabled` + + Please refer to the following documents for more details on the custom values file of each Scalar product. + + * [ScalarDB Cluster](https://github.com/scalar-labs/helm-charts/blob/main/docs/configure-custom-values-scalardb-cluster.md#prometheus-and-grafana-configurations--recommended-in-production-environments) + * [(Deprecated) ScalarDB Server](https://github.com/scalar-labs/helm-charts/blob/main/docs/configure-custom-values-scalardb.md#prometheusgrafana-configurations--recommended-in-the-production-environment) + * [(Deprecated) ScalarDB GraphQL](https://github.com/scalar-labs/helm-charts/blob/main/docs/configure-custom-values-scalardb-graphql.md#prometheusgrafana-configurations-recommended-in-the-production-environment) + * [ScalarDL Ledger](https://github.com/scalar-labs/helm-charts/blob/main/docs/configure-custom-values-scalardl-ledger.md#prometheusgrafana-configurations-recommended-in-the-production-environment) + * [ScalarDL Auditor](https://github.com/scalar-labs/helm-charts/blob/main/docs/configure-custom-values-scalardl-auditor.md#prometheusgrafana-configurations-recommended-in-the-production-environment) + +1. Deploy (or Upgrade) Scalar products using Helm Charts with the above custom values file. + + Please refer to the following documents for more details on how to deploy/upgrade Scalar products. + + * [ScalarDB Cluster](https://github.com/scalar-labs/helm-charts/blob/main/docs/how-to-deploy-scalardb-cluster.md) + * [(Deprecated) ScalarDB Server](https://github.com/scalar-labs/helm-charts/blob/main/docs/how-to-deploy-scalardb.md) + * [(Deprecated) ScalarDB GraphQL](https://github.com/scalar-labs/helm-charts/blob/main/docs/how-to-deploy-scalardb-graphql.md) + * [ScalarDL Ledger](https://github.com/scalar-labs/helm-charts/blob/main/docs/how-to-deploy-scalardl-ledger.md) + * [ScalarDL Auditor](https://github.com/scalar-labs/helm-charts/blob/main/docs/how-to-deploy-scalardl-auditor.md) + +## How to access dashboards + +When you set `*.service.type` to `LoadBalancer` or `*.ingress.enabled` to `true`, you can access dashboards via Service or Ingress of Kubernetes. The concrete implementation and access method depend on the Kubernetes cluster. If you use a managed Kubernetes cluster, please refer to the cloud provider's official document for more details. + +* EKS + * [Network load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/network-load-balancing.html) + * [Application load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) +* AKS + * [Use a public standard load balancer in Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/load-balancer-standard) + * [Create an ingress controller in Azure Kubernetes Service (AKS)](https://learn.microsoft.com/en-us/azure/aks/ingress-basic) + +## Access the dashboard from your local machine (For testing purposes only / Not recommended in the production environment) + +You can access each dashboard from your local machine using the `kubectl port-forward` command. + +1. Port forwarding to each service from your local machine. + * Prometheus + + ```console + kubectl port-forward -n monitoring svc/scalar-monitoring-kube-pro-prometheus 9090:9090 + ``` + + * Alertmanager + + ```console + kubectl port-forward -n monitoring svc/scalar-monitoring-kube-pro-alertmanager 9093:9093 + ``` + + * Grafana + + ```console + kubectl port-forward -n monitoring svc/scalar-monitoring-grafana 3000:3000 + ``` + +1. Access each Dashboard. + * Prometheus + + ```console + http://localhost:9090/ + ``` + + * Alertmanager + + ```console + http://localhost:9093/ + ``` + + * Grafana + + ```console + http://localhost:3000/ + ``` + + * Note: + * You can see the user and password of Grafana as follows. + * user + + ```console + kubectl get secrets scalar-monitoring-grafana -n monitoring -o jsonpath='{.data.admin-user}' | base64 -d + ``` + + * password + + ```console + kubectl get secrets scalar-monitoring-grafana -n monitoring -o jsonpath='{.data.admin-password}' | base64 -d + ``` diff --git a/docs/3.12/scalar-kubernetes/ManualDeploymentGuideScalarDBClusterOnEKS.md b/docs/3.12/scalar-kubernetes/ManualDeploymentGuideScalarDBClusterOnEKS.md new file mode 100644 index 00000000..e1f49d90 --- /dev/null +++ b/docs/3.12/scalar-kubernetes/ManualDeploymentGuideScalarDBClusterOnEKS.md @@ -0,0 +1,59 @@ +# Deploy ScalarDB Cluster on Amazon Elastic Kubernetes Service (EKS) + +This guide explains how to deploy ScalarDB Cluster on Amazon Elastic Kubernetes Service (EKS). + +In this guide, you will create one of the following two environments in your AWS environment. The environments differ depending on which [client mode](https://github.com/scalar-labs/scalardb-cluster/blob/main/docs/developer-guide-for-scalardb-cluster-with-java-api.md#client-modes) you use: + +* **[`direct-kubernetes` client mode](https://github.com/scalar-labs/scalardb-cluster/blob/main/docs/developer-guide-for-scalardb-cluster-with-java-api.md#direct-kubernetes-client-mode).** In this mode, you deploy your application in the same EKS cluster as your ScalarDB Cluster deployment. + + ![image](./images/png/EKS_ScalarDB_Cluster_Direct_Kubernetes_Mode.drawio.png) + +* **[`indirect` client mode](https://github.com/scalar-labs/scalardb-cluster/blob/main/docs/developer-guide-for-scalardb-cluster-with-java-api.md#indirect-client-mode).** In this mode, you deploy your application in an environment that is different from the EKS cluster that contains your ScalarDB Cluster deployment. + + ![image](./images/png/EKS_ScalarDB_Cluster_Indirect_Mode.drawio.png) + +## Step 1. Subscribe to ScalarDB Cluster in AWS Marketplace + +You must get the ScalarDB Cluster container image by visiting AWS Marketplace and subscribing to [ScalarDB Cluster Standard Edition (Pay-As-You-Go)](https://aws.amazon.com/marketplace/pp/prodview-jx6qxatkxuwm4) or [ScalarDB Cluster Premium Edition (Pay-As-You-Go)](https://aws.amazon.com/marketplace/pp/prodview-djqw3zk6dwyk6). For details on how to subscribe to ScalarDB Cluster in AWS Marketplace, see [Subscribe to Scalar products from AWS Marketplace](./AwsMarketplaceGuide.md#subscribe-to-scalar-products-from-aws-marketplace). + +## Step 2. Create an EKS cluster + +You must create an EKS cluster for the ScalarDB Cluster deployment. For details, see [Guidelines for creating an Amazon EKS cluster for Scalar products](./CreateEKSClusterForScalarProducts.md). + +## Step 3. Set up a database for ScalarDB Cluster + +You must prepare a database before deploying ScalarDB Cluster. To see which types of databases ScalarDB supports, refer to [ScalarDB Supported Databases](https://github.com/scalar-labs/scalardb/blob/master/docs/scalardb-supported-databases.md). + +For details on setting up a database, see [Set up a database for ScalarDB/ScalarDL deployment on AWS](./SetupDatabaseForAWS.md). + +## Step 4. Create a bastion server + +To execute some tools for deploying and managing ScalarDB Cluster on EKS, you must prepare a bastion server in the same Amazon Virtual Private Cloud (VPC) of the EKS cluster that you created in **Step 2**. For details, see [Create a Bastion Server](./CreateBastionServer.md). + +## Step 5. Prepare a custom values file for the Scalar Helm Chart + +To perform tasks, like accessing information in the database that you created in **Step 3**, you must configure a custom values file for the Scalar Helm Chart for ScalarDB Cluster based on your environment. For details, see [Configure a custom values file for Scalar Helm Charts](https://github.com/scalar-labs/helm-charts/blob/main/docs/configure-custom-values-file.md). + +**Note:** If you deploy your application in an environment that is different from the EKS cluster that has your ScalarDB Cluster deployment (i.e., you use `indirect` client mode), you must set the `envoy.enabled` parameter to `true` and the `envoy.service.type` parameter to `LoadBalancer` to access Scalar Envoy from your application. + +## Step 6. Deploy ScalarDB Cluster by using the Scalar Helm Chart + +Deploy ScalarDB Cluster on your EKS cluster by using the Helm Chart for ScalarDB Cluster. For details, see [Deploy Scalar products using Scalar Helm Charts](https://github.com/scalar-labs/helm-charts/blob/main/docs/how-to-deploy-scalar-products.md). + +**Note:** We recommend creating a dedicated namespace by using the `kubectl create ns scalardb-cluster` command and deploying ScalarDB Cluster in the namespace by using the `-n scalardb-cluster` option with the `helm install` command. + +## Step 7. Check the status of your ScalarDB Cluster deployment + +After deploying ScalarDB Cluster in your EKS cluster, you must check the status of each component. For details, see [Components to Regularly Check When Running in a Kubernetes Environment](./RegularCheck.md). + +## Step 8. Monitor your ScalarDB Cluster deployment + +After deploying ScalarDB Cluster in your EKS cluster, we recommend monitoring the deployed components and collecting their logs, especially in production. For details, see [Monitoring Scalar products on a Kubernetes cluster](./K8sMonitorGuide.md) and [Collecting logs from Scalar products on a Kubernetes cluster](./K8sLogCollectionGuide.md). + +## Step 9. Deploy your application + +If you use [`direct-kubernetes` client mode](https://github.com/scalar-labs/scalardb-cluster/blob/main/docs/developer-guide-for-scalardb-cluster-with-java-api.md#direct-kubernetes-client-mode), you must deploy additional Kubernetes resources. For details, see [Deploy your client application on Kubernetes with `direct-kubernetes` mode](https://github.com/scalar-labs/helm-charts/blob/main/docs/how-to-deploy-scalardb-cluster.md#deploy-your-client-application-on-kubernetes-with-direct-kubernetes-mode). + +## Remove ScalarDB Cluster from EKS + +If you want to remove the environment that you created, please remove all the resources in reverse order from which you created them in. diff --git a/docs/3.12/scalar-kubernetes/ManualDeploymentGuideScalarDBServerOnAKS.md b/docs/3.12/scalar-kubernetes/ManualDeploymentGuideScalarDBServerOnAKS.md new file mode 100644 index 00000000..7ef536c3 --- /dev/null +++ b/docs/3.12/scalar-kubernetes/ManualDeploymentGuideScalarDBServerOnAKS.md @@ -0,0 +1,55 @@ +# Deploy ScalarDB Server on Azure Kubernetes Service (AKS) + +This guide explains how to deploy ScalarDB Server on Azure Kubernetes Service (AKS). + +In this guide, you will create one of the following two environments in your Azure environment. The difference between the two environments is how you plan to deploy the application: + +* Deploy your application in the same AKS cluster as your ScalarDB Server deployment. In this case, you don't need to use the load balancers that Azure provides to access Scalar Envoy from your application. + + ![image](./images/png/AKS_ScalarDB_Server_App_In_Cluster.drawio.png) + +* Deploy your application in an environment that is different from the AKS cluster that contains your ScalarDB Server deployment. In this case, you must use the load balancers that Azure provides to access Scalar Envoy from your application. + + ![image](./images/png/AKS_ScalarDB_Server_App_Out_Cluster.drawio.png) + +## Step 1. Subscribe to ScalarDB Server in Azure Marketplace + +You must get the ScalarDB Server container image by visiting [Azure Marketplace](https://azuremarketplace.microsoft.com/en/marketplace/apps/scalarinc.scalardb) and subscribing to ScalarDB Server. For details on how to subscribe to ScalarDB Server in Azure Marketplace, see [Get Scalar products from Microsoft Azure Marketplace](./AzureMarketplaceGuide.md#get-scalar-products-from-microsoft-azure-marketplace). + +## Step 2. Create an AKS cluster + +You must create an AKS cluster for the ScalarDB Server deployment. For details, see [Guidelines for creating an AKS cluster for Scalar products](./CreateAKSClusterForScalarProducts.md). + +## Step 3. Set up a database for ScalarDB Server + +You must prepare a database before deploying ScalarDB Server. To see which types of databases ScalarDB supports, refer to [ScalarDB Supported Databases](https://github.com/scalar-labs/scalardb/blob/master/docs/scalardb-supported-databases.md). + +For details on setting up a database, see [Set up a database for ScalarDB/ScalarDL deployment in Azure](./SetupDatabaseForAzure.md). + +## Step 4. Create a bastion server + +To execute some tools for deploying and managing ScalarDB Server on AKS, you must prepare a bastion server in the same Azure Virtual Network (VNet) of the AKS cluster that you created in **Step 2**. For details, see [Create a Bastion Server](./CreateBastionServer.md). + +## Step 5. Prepare a custom values file for the Scalar Helm Chart + +To perform tasks, like accessing information in the database that you created in **Step 3**, you must configure a custom values file for the Scalar Helm Chart for ScalarDB Server based on your environment. For details, see [Configure a custom values file of Scalar Helm Chart](https://github.com/scalar-labs/helm-charts/blob/main/docs/configure-custom-values-file.md). + +**Note:** If you deploy your application in an environment that is different from the AKS cluster that has your ScalarDB Server deployment, you must set the `envoy.service.type` parameter to `LoadBalancer` to access Scalar Envoy from your application. + +## Step 6. Deploy ScalarDB Server by using the Scalar Helm Chart + +Deploy ScalarDB Server on your AKS cluster by using the Helm Chart for ScalarDB Server. For details, see [Deploy Scalar Products using Scalar Helm Chart](https://github.com/scalar-labs/helm-charts/blob/main/docs/how-to-deploy-scalar-products.md). + +**Note:** We recommend creating a dedicated namespace by using the `kubectl create ns scalardb` command and deploying ScalarDB Server in the namespace by using the `-n scalardb` option with the `helm install` command. + +## Step 7. Check the status of your ScalarDB Server deployment + +After deploying ScalarDB Server in your AKS cluster, you must check the status of each component. For details, see [Components to Regularly Check When Running in a Kubernetes Environment](./RegularCheck.md). + +## Step 8. Monitor your ScalarDB Server deployment + +After deploying ScalarDB Server in your AKS cluster, we recommend monitoring the deployed components and collecting their logs, especially in production. For details, see [Monitoring Scalar products on a Kubernetes cluster](./K8sMonitorGuide.md) and [Collecting logs from Scalar products on a Kubernetes cluster](./K8sLogCollectionGuide.md). + +## Remove ScalarDB Server from AKS + +If you want to remove the environment that you created, please remove all the resources in reverse order from which you created them in. diff --git a/docs/3.12/scalar-kubernetes/ManualDeploymentGuideScalarDBServerOnEKS.md b/docs/3.12/scalar-kubernetes/ManualDeploymentGuideScalarDBServerOnEKS.md new file mode 100644 index 00000000..608781c7 --- /dev/null +++ b/docs/3.12/scalar-kubernetes/ManualDeploymentGuideScalarDBServerOnEKS.md @@ -0,0 +1,55 @@ +# Deploy ScalarDB Server on Amazon Elastic Kubernetes Service (EKS) + +This guide explains how to deploy ScalarDB Server on Amazon Elastic Kubernetes Service (EKS). + +In this guide, you will create one of the following two environments in your AWS environment. The difference between the two environments is how you plan to deploy the application: + +* Deploy your application in the same EKS cluster as your ScalarDB Server deployment. In this case, you don't need to use the load balancers that AWS provides to access Scalar Envoy from your application. + + ![image](./images/png/EKS_ScalarDB_Server_App_In_Cluster.drawio.png) + +* Deploy your application in an environment that is different from the EKS cluster that contains your ScalarDB Server deployment. In this case, you must use the load balancers that AWS provides to access Scalar Envoy from your application. + + ![image](./images/png/EKS_ScalarDB_Server_App_Out_Cluster.drawio.png) + +## Step 1. Subscribe to ScalarDB Server in AWS Marketplace + +You must get the ScalarDB Server container image by visiting [AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-rzbuhxgvqf4d2) and subscribing to ScalarDB Server. For details on how to subscribe to ScalarDB Server in AWS Marketplace, see [Subscribe to Scalar products from AWS Marketplace](./AwsMarketplaceGuide.md#subscribe-to-scalar-products-from-aws-marketplace). + +## Step 2. Create an EKS cluster + +You must create an EKS cluster for the ScalarDB Server deployment. For details, see [Guidelines for creating an Amazon EKS cluster for Scalar products](./CreateEKSClusterForScalarProducts.md). + +## Step 3. Set up a database for ScalarDB Server + +You must prepare a database before deploying ScalarDB Server. To see which types of databases ScalarDB supports, refer to [ScalarDB Supported Databases](https://github.com/scalar-labs/scalardb/blob/master/docs/scalardb-supported-databases.md). + +For details on setting up a database, see [Set up a database for ScalarDB/ScalarDL deployment on AWS](./SetupDatabaseForAWS.md). + +## Step 4. Create a bastion server + +To execute some tools for deploying and managing ScalarDB Server on EKS, you must prepare a bastion server in the same Amazon Virtual Private Cloud (VPC) of the EKS cluster that you created in **Step 2**. For details, see [Create a Bastion Server](./CreateBastionServer.md). + +## Step 5. Prepare a custom values file for the Scalar Helm Chart + +To perform tasks, like accessing information in the database that you created in **Step 3**, you must configure a custom values file for the Scalar Helm Chart for ScalarDB Server based on your environment. For details, see [Configure a custom values file for Scalar Helm Charts](https://github.com/scalar-labs/helm-charts/blob/main/docs/configure-custom-values-file.md). + +**Note:** If you deploy your application in an environment that is different from the EKS cluster that has your ScalarDB Server deployment, you must set the `envoy.service.type` parameter to `LoadBalancer` to access Scalar Envoy from your application. + +## Step 6. Deploy ScalarDB Server by using the Scalar Helm Chart + +Deploy ScalarDB Server on your EKS cluster by using the Helm Chart for ScalarDB Server. For details, see [Deploy Scalar products using Scalar Helm Charts](https://github.com/scalar-labs/helm-charts/blob/main/docs/how-to-deploy-scalar-products.md). + +**Note:** We recommend creating a dedicated namespace by using the `kubectl create ns scalardb` command and deploying ScalarDB Server in the namespace by using the `-n scalardb` option with the `helm install` command. + +## Step 7. Check the status of your ScalarDB Server deployment + +After deploying ScalarDB Server in your EKS cluster, you must check the status of each component. For details, see [Components to Regularly Check When Running in a Kubernetes Environment](./RegularCheck.md). + +## Step 8. Monitor your ScalarDB Server deployment + +After deploying ScalarDB Server in your EKS cluster, we recommend monitoring the deployed components and collecting their logs, especially in production. For details, see [Monitoring Scalar products on a Kubernetes cluster](./K8sMonitorGuide.md) and [Collecting logs from Scalar products on a Kubernetes cluster](./K8sLogCollectionGuide.md). + +## Remove ScalarDB Server from EKS + +If you want to remove the environment that you created, please remove all the resources in reverse order from which you created them in. diff --git a/docs/3.12/scalar-kubernetes/ManualDeploymentGuideScalarDLAuditorOnAKS.md b/docs/3.12/scalar-kubernetes/ManualDeploymentGuideScalarDLAuditorOnAKS.md new file mode 100644 index 00000000..1ecbc455 --- /dev/null +++ b/docs/3.12/scalar-kubernetes/ManualDeploymentGuideScalarDLAuditorOnAKS.md @@ -0,0 +1,95 @@ +# Deploy ScalarDL Ledger and ScalarDL Auditor on Azure Kubernetes Service (AKS) + +This guide explains how to deploy ScalarDL Ledger and ScalarDL Auditor on Azure Kubernetes Service (AKS). + +In this guide, you will create one of the following three environments in your Azure environment. To make Byzantine fault detection work properly, we recommend deploying ScalarDL Ledger and ScalarDL Auditor on different administrative domains (i.e., separate environments). + +* Use different Azure accounts (most recommended way) + + ![image](./images/png/AKS_ScalarDL_Auditor_Multi_Account.drawio.png) + +* Use different Azure Virtual Networks (VNets) (second recommended way) + + ![image](./images/png/AKS_ScalarDL_Auditor_Multi_VNet.drawio.png) + +* Use different namespaces (third recommended way) + + ![image](./images/png/AKS_ScalarDL_Auditor_Multi_Namespace.drawio.png) + +**Note:** This guide follows the second recommended way, "Use different VNets." + +## Step 1. Subscribe to ScalarDL Ledger and ScalarDL Auditor in Azure Marketplace + +You must get the ScalarDL Ledger and ScalarDL Auditor container images by visiting [Azure Marketplace](https://azuremarketplace.microsoft.com/en/marketplace/apps/scalarinc.scalardl) and subscribing to ScalarDL Ledger and ScalarDL Auditor. For details on how to subscribe to ScalarDL Ledger and ScalarDL Auditor in Azure Marketplace, see [Get Scalar products from Microsoft Azure Marketplace](./AzureMarketplaceGuide.md#get-scalar-products-from-microsoft-azure-marketplace). + +## Step 2. Create an AKS cluster for ScalarDL Ledger + +You must create an AKS cluster for the ScalarDL Ledger deployment. For details, see [Guidelines for creating an AKS cluster for Scalar products](./CreateAKSClusterForScalarProducts.md). + +## Step 3. Create an AKS cluster for ScalarDL Auditor + +You must also create an AKS cluster for the ScalarDL Auditor deployment. For details, see [Guidelines for creating an AKS cluster for Scalar products](./CreateAKSClusterForScalarProducts.md). + +## Step 4. Set up a database for ScalarDL Ledger + +You must prepare a database before deploying ScalarDL Ledger. Because ScalarDL Ledger uses ScalarDB internally to access databases, refer to [ScalarDB Supported Databases](https://github.com/scalar-labs/scalardb/blob/master/docs/scalardb-supported-databases.md) to see which types of databases ScalarDB supports. + +For details on setting up a database, see [Set up a database for ScalarDB/ScalarDL deployment in Azure](./SetupDatabaseForAzure.md). + +## Step 5. Set up a database for ScalarDL Auditor + +You must also prepare a database before deploying ScalarDL Auditor. Because ScalarDL Auditor uses ScalarDB internally to access databases, refer to [ScalarDB Supported Databases](https://github.com/scalar-labs/scalardb/blob/master/docs/scalardb-supported-databases.md) to see which types of databases ScalarDB supports. + +For details on setting up a database, see [Set up a database for ScalarDB/ScalarDL deployment in Azure](./SetupDatabaseForAzure.md). + +## Step 6. Create a bastion server for ScalarDL Ledger + +To execute some tools for deploying and managing ScalarDL Ledger on AKS, you must prepare a bastion server in the same VNet of the AKS cluster that you created in **Step 2**. For details, see [Create a Bastion Server](./CreateBastionServer.md). + +## Step 7. Create a bastion server for ScalarDL Auditor + +To execute some tools for deploying and managing ScalarDL Auditor on AKS, you must prepare a bastion server in the same VNet of the AKS cluster that you created in **Step 3**. For details, see [Create a Bastion Server](./CreateBastionServer.md). + +## Step 8. Create network peering between two AKS clusters + +To make ScalarDL work properly, ScalarDL Ledger and ScalarDL Auditor need to connect to each other. You must connect two VNets by using [virtual network peering](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-peering-overview). For details, see [Configure Network Peering for ScalarDL Auditor Mode](./NetworkPeeringForScalarDLAuditor.md). + +## Step 9. Prepare custom values files for the Scalar Helm Charts for both ScalarDL Ledger and ScalarDL Schema Loader + +To perform tasks, like accessing information in the database that you created in **Step 4**, you must configure custom values files for the Scalar Helm Charts for both ScalarDL Ledger and ScalarDL Schema Loader (for Ledger) based on your environment. For details, see [Configure a custom values file for Scalar Helm Charts](https://github.com/scalar-labs/helm-charts/blob/main/docs/configure-custom-values-file.md). + +## Step 10. Deploy ScalarDL Ledger by using the Scalar Helm Chart + +Deploy ScalarDL Ledger on your AKS cluster by using the Helm Chart for ScalarDL Ledger. For details, see [Deploy Scalar products using Scalar Helm Charts](https://github.com/scalar-labs/helm-charts/blob/main/docs/how-to-deploy-scalar-products.md). + +**Note:** We recommend creating a dedicated namespace by using the `kubectl create ns scalardl-ledger` command and deploying ScalarDL Ledger in the namespace by using the `-n scalardl-ledger` option with the `helm install` command. + +## Step 11. Prepare custom values files for the Scalar Helm Charts for both ScalarDL Auditor and ScalarDL Schema Loader + +To perform tasks, like accessing information in the database that you created in **Step 5**, you must also configure a custom values files for the Scalar Helm Chart for both ScalarDL Auditor and ScalarDL Schema Loader (for Auditor) based on your environment. For details, see [Configure a custom values file for Scalar Helm Charts](https://github.com/scalar-labs/helm-charts/blob/main/docs/configure-custom-values-file.md). + +## Step 12. Deploy ScalarDL Auditor by using the Scalar Helm Chart + +Deploy ScalarDL Auditor on your AKS cluster by using the Helm Chart for ScalarDL Auditor. For details, see [Deploy Scalar products using Scalar Helm Charts](https://github.com/scalar-labs/helm-charts/blob/main/docs/how-to-deploy-scalar-products.md). + +**Note:** We recommend creating a dedicated namespace by using the `kubectl create ns scalardl-auditor` command and deploying ScalarDL Auditor in the namespace by using the `-n scalardl-auditor` option with the `helm install` command. + +## Step 13. Check the status of your ScalarDL Ledger deployment + +After deploying ScalarDL Ledger in your AKS cluster, you must check the status of each component. For details, see [Components to Regularly Check When Running in a Kubernetes Environment](./RegularCheck.md). + +## Step 14. Check the status of your ScalarDL Auditor deployment + +After deploying ScalarDL Auditor in your AKS cluster, you must check the status of each component. For details, see [Components to Regularly Check When Running in a Kubernetes Environment](./RegularCheck.md). + +## Step 15. Monitor your ScalarDL Ledger deployment + +After deploying ScalarDL Ledger in your AKS cluster, we recommend monitoring the deployed components and collecting their logs, especially in production. For details, see [Monitoring Scalar products on a Kubernetes cluster](./K8sMonitorGuide.md) and [Collecting logs from Scalar products on a Kubernetes cluster](./K8sLogCollectionGuide.md). + +## Step 16. Monitor your ScalarDL Auditor deployment + +After deploying ScalarDL Auditor in your AKS cluster, we recommend monitoring the deployed components and collecting their logs, especially in production. For details, see [Monitoring Scalar products on a Kubernetes cluster](./K8sMonitorGuide.md) and [Collecting logs from Scalar products on a Kubernetes cluster](./K8sLogCollectionGuide.md). + +## Remove ScalarDL Ledger and ScalarDL Auditor from AKS + +If you want to remove the environment that you created, please remove all the resources in reverse order from which you created them in. diff --git a/docs/3.12/scalar-kubernetes/ManualDeploymentGuideScalarDLAuditorOnEKS.md b/docs/3.12/scalar-kubernetes/ManualDeploymentGuideScalarDLAuditorOnEKS.md new file mode 100644 index 00000000..360649bb --- /dev/null +++ b/docs/3.12/scalar-kubernetes/ManualDeploymentGuideScalarDLAuditorOnEKS.md @@ -0,0 +1,95 @@ +# Deploy ScalarDL Ledger and ScalarDL Auditor on Amazon Elastic Kubernetes Service (EKS) + +This guide explains how to deploy ScalarDL Ledger and ScalarDL Auditor on Amazon Elastic Kubernetes Service (EKS). + +In this guide, you will create one of the following three environments in your AWS environment. To make Byzantine fault detection work properly, we recommend deploying ScalarDL Ledger and ScalarDL Auditor on different administrative domains (i.e., separate environments). + +* Use different AWS accounts (most recommended way) + + ![image](./images/png/EKS_ScalarDL_Auditor_Multi_Account.drawio.png) + +* Use different Amazon Virtual Private Clouds (VPCs) (second recommended way) + + ![image](./images/png/EKS_ScalarDL_Auditor_Multi_VPC.drawio.png) + +* Use different namespaces (third recommended way) + + ![image](./images/png/EKS_ScalarDL_Auditor_Multi_Namespace.drawio.png) + +**Note:** This guide follows the second recommended way, "Use different VPCs." + +## Step 1. Subscribe to ScalarDL Ledger and ScalarDL Auditor in AWS Marketplace + +You must get the ScalarDL Ledger and ScalarDL Auditor container images from [AWS Marketplace](https://aws.amazon.com/marketplace/seller-profile?id=bd4cd7de-49cd-433f-97ba-5cf71d76ec7b) and subscribe to ScalarDL Ledger and ScalarDL Auditor. For details on how to subscribe to ScalarDL Ledger and ScalarDL Auditor in AWS Marketplace, see [Subscribe to Scalar products from AWS Marketplace](./AwsMarketplaceGuide.md#subscribe-to-scalar-products-from-aws-marketplace). + +## Step 2. Create an EKS cluster for ScalarDL Ledger + +You must create an EKS cluster for the ScalarDL Ledger deployment. For details, see [Guidelines for creating an Amazon EKS cluster for Scalar products](./CreateEKSClusterForScalarProducts.md). + +## Step 3. Create an EKS cluster for ScalarDL Auditor + +You must also create an EKS cluster for the ScalarDL Auditor deployment. For details, see [Guidelines for creating an Amazon EKS cluster for Scalar products](./CreateEKSClusterForScalarProducts.md). + +## Step 4. Set up a database for ScalarDL Ledger + +You must prepare a database before deploying ScalarDL Ledger. Because ScalarDL Ledger uses ScalarDB internally to access databases, refer to [ScalarDB Supported Databases](https://github.com/scalar-labs/scalardb/blob/master/docs/scalardb-supported-databases.md) to see which types of databases ScalarDB supports. + +For details on setting up a database, see [Set up a database for ScalarDB/ScalarDL deployment on AWS](./SetupDatabaseForAWS.md). + +## Step 5. Set up a database for ScalarDL Auditor + +You must also prepare a database before deploying ScalarDL Auditor. Because ScalarDL Auditor uses ScalarDB internally to access databases, refer to [ScalarDB Supported Databases](https://github.com/scalar-labs/scalardb/blob/master/docs/scalardb-supported-databases.md) to see which types of databases ScalarDB supports. + +For details on setting up a database, see [Set up a database for ScalarDB/ScalarDL deployment on AWS](./SetupDatabaseForAWS.md). + +## Step 6. Create a bastion server for ScalarDL Ledger + +To execute some tools for deploying and managing ScalarDL Ledger on EKS, you must prepare a bastion server in the same VPC of the EKS cluster that you created in **Step 2**. For details, see [Create a Bastion Server](./CreateBastionServer.md). + +## Step 7. Create a bastion server for ScalarDL Auditor + +To execute some tools for deploying and managing ScalarDL Auditor on EKS, you must prepare a bastion server in the same VPC of the EKS cluster that you created in **Step 3**. For details, see [Create a Bastion Server](./CreateBastionServer.md). + +## Step 8. Create network peering between two EKS clusters + +To make ScalarDL work properly, ScalarDL Ledger and ScalarDL Auditor need to connect to each other. You must connect two VPCs by using [VPC peering](https://docs.aws.amazon.com/vpc/latest/peering/create-vpc-peering-connection.html). For details, see [Configure network peering for ScalarDL Auditor mode](./NetworkPeeringForScalarDLAuditor.md). + +## Step 9. Prepare custom values files for the Scalar Helm Charts for ScalarDL Ledger and ScalarDL Schema Loader + +To perform tasks, like accessing information in the database that you created in **Step 4**, you must configure custom values files for the Scalar Helm Charts for ScalarDL Ledger and ScalarDL Schema Loader (for Ledger) based on your environment. For details, see [Configure a custom values file for Scalar Helm Charts](https://github.com/scalar-labs/helm-charts/blob/main/docs/configure-custom-values-file.md). + +## Step 10. Deploy ScalarDL Ledger by using the Scalar Helm Chart + +Deploy ScalarDL Ledger in your EKS cluster by using the Helm Chart for ScalarDL Ledger. For details, see [Deploy Scalar products using Scalar Helm Charts](https://github.com/scalar-labs/helm-charts/blob/main/docs/how-to-deploy-scalar-products.md). + +**Note:** We recommend creating a dedicated namespace by using the `kubectl create ns scalardl-ledger` command and deploying ScalarDL Ledger in the namespace by using the `-n scalardl-ledger` option with the `helm install` command. + +## Step 11. Prepare custom values files for the Scalar Helm Charts for both ScalarDL Auditor and ScalarDL Schema Loader + +To perform tasks, like accessing information in the database that you created in **Step 5**, you must configure custom values files for the Scalar Helm Charts for both ScalarDL Auditor and ScalarDL Schema Loader (for Auditor) based on your environment. For details, see [Configure a custom values file for Scalar Helm Charts](https://github.com/scalar-labs/helm-charts/blob/main/docs/configure-custom-values-file.md). + +## Step 12. Deploy ScalarDL Auditor by using the Scalar Helm Chart + +Deploy ScalarDL Auditor in your EKS cluster by using the Helm Chart for ScalarDL Auditor. For details , see [Deploy Scalar products using Scalar Helm Charts](https://github.com/scalar-labs/helm-charts/blob/main/docs/how-to-deploy-scalar-products.md). + +**Note:** We recommend creating a dedicated namespace by using the `kubectl create ns scalardl-auditor` command and deploying ScalarDL Auditor in the namespace by using the `-n scalardl-auditor` option with the `helm install` command. + +## Step 13. Check the status of your ScalarDL Ledger deployment + +After deploying ScalarDL Ledger in your EKS cluster, you must check the status of each component. For details, see [Components to Regularly Check When Running in a Kubernetes Environment](./RegularCheck.md) for more details. + +## Step 14. Check the status of your ScalarDL Auditor deployment + +After deploying ScalarDL Auditor on your EKS cluster, you need to check the status of each component. See [Components to Regularly Check When Running in a Kubernetes Environment](./RegularCheck.md) for more details. + +## Step 15. Monitor your ScalarDL Ledger deployment + +After deploying ScalarDL Ledger in your EKS cluster, we recommend monitoring the deployed components and collecting their logs, especially in production. For details, see [Monitoring Scalar products on a Kubernetes cluster](./K8sMonitorGuide.md) and [Collecting logs from Scalar products on a Kubernetes cluster](./K8sLogCollectionGuide.md). + +## Step 16. Monitor your ScalarDL Auditor deployment + +After deploying ScalarDL Auditor in your EKS cluster, we recommend monitoring the deployed components and collecting their logs, especially in production. For details, see [Monitoring Scalar products on a Kubernetes cluster](./K8sMonitorGuide.md) and [Collecting logs from Scalar products on a Kubernetes cluster](./K8sLogCollectionGuide.md). + +## Remove ScalarDL Ledger and ScalarDL Auditor from EKS + +If you want to remove the environment you created, please remove all the resources in reverse order from which you created them in. diff --git a/docs/3.12/scalar-kubernetes/ManualDeploymentGuideScalarDLOnAKS.md b/docs/3.12/scalar-kubernetes/ManualDeploymentGuideScalarDLOnAKS.md new file mode 100644 index 00000000..b163782e --- /dev/null +++ b/docs/3.12/scalar-kubernetes/ManualDeploymentGuideScalarDLOnAKS.md @@ -0,0 +1,47 @@ +# Deploy ScalarDL Ledger on Azure Kubernetes Service (AKS) + +This document explains how to deploy **ScalarDL Ledger** on Azure Kubernetes Service (AKS). + +In this guide, you will create the following environment in your Azure environment. + +![image](./images/png/AKS_ScalarDL_Ledger.drawio.png) + +## Step 1. Subscribe to ScalarDL Ledger in Azure Marketplace + +You must get the ScalarDL Ledger container image from [Azure Marketplace](https://azuremarketplace.microsoft.com/en/marketplace/apps/scalarinc.scalardl) and subscribe to ScalarDL. For details on how to subscribe to ScalarDL Ledger in Azure Marketplace, see [Get Scalar products from Microsoft Azure Marketplace](./AzureMarketplaceGuide.md#get-scalar-products-from-microsoft-azure-marketplace). + +## Step 2. Create an AKS cluster + +You must create an AKS cluster for the ScalarDL Ledger deployment. For details, see [Guidelines for creating an AKS cluster for Scalar products](./CreateAKSClusterForScalarProducts.md). + +## Step 3. Set up a database for ScalarDL Ledger + +You must prepare a database before deploying ScalarDL Ledger. Because ScalarDL Ledger uses ScalarDB internally to access databases, refer to [ScalarDB Supported Databases](https://github.com/scalar-labs/scalardb/blob/master/docs/scalardb-supported-databases.md) to see which types of databases ScalarDB supports. + +For details on setting up a database, see [Set up a database for ScalarDB/ScalarDL deployment in Azure](./SetupDatabaseForAzure.md). + +## Step 4. Create a bastion server + +To execute some tools for deploying and managing ScalarDL Ledger on AKS, you must prepare a bastion server in the same Azure Virtual Network (VNet) of the AKS cluster that you created in **Step 2**. For details, see [Create a Bastion Server](./CreateBastionServer.md). + +## Step 5. Prepare custom values files for the Scalar Helm Charts for both ScalarDL Ledger and ScalarDL Schema Loader + +To perform tasks, like accessing information in the database that you created in **Step 3**, you must configure custom values files for the Scalar Helm Charts for both ScalarDL Ledger and ScalarDL Schema Loader (for Ledger) based on your environment. For details, see [Configure a custom values file for Scalar Helm Charts](https://github.com/scalar-labs/helm-charts/blob/main/docs/configure-custom-values-file.md). + +## Step 6. Deploy ScalarDL Ledger by using the Scalar Helm Chart + +Deploy ScalarDL Ledger in your AKS cluster by using the Helm Chart for ScalarDL Ledger. For details, see [Deploy Scalar products using Scalar Helm Charts](https://github.com/scalar-labs/helm-charts/blob/main/docs/how-to-deploy-scalar-products.md). + +**Note:** We recommend creating a dedicated namespace by using the `kubectl create ns scalardl-ledger` command and deploying ScalarDL Ledger in the namespace by using the `-n scalardl-ledger` option with the `helm install` command. + +## Step 7. Check the status your ScalarDL Ledger deployment + +After deploying ScalarDL Ledger in your AKS cluster, you must check the status of each component. For details, see [Components to Regularly Check When Running in a Kubernetes Environment](./RegularCheck.md). + +## Step 8. Monitor your ScalarDL Ledger deployment + +After deploying ScalarDL Ledger in your AKS cluster, we recommend monitoring the deployed components and collecting their logs, especially in production. For details, see [Monitoring Scalar products on a Kubernetes cluster](./K8sMonitorGuide.md) and [Collecting logs from Scalar products on a Kubernetes cluster](./K8sLogCollectionGuide.md). + +## Remove ScalarDL Ledger from AKS + +If you want to remove the environment that you created, please remove all the resources in reverse order from which you created them in. diff --git a/docs/3.12/scalar-kubernetes/ManualDeploymentGuideScalarDLOnEKS.md b/docs/3.12/scalar-kubernetes/ManualDeploymentGuideScalarDLOnEKS.md new file mode 100644 index 00000000..72201a05 --- /dev/null +++ b/docs/3.12/scalar-kubernetes/ManualDeploymentGuideScalarDLOnEKS.md @@ -0,0 +1,47 @@ +# Deploy ScalarDL Ledger on Amazon Elastic Kubernetes Service (EKS) + +This document explains how to deploy **ScalarDL Ledger** on Amazon Elastic Kubernetes Service (EKS). + +In this guide, you will create the following environment in your AWS environment account. + +![image](./images/png/EKS_ScalarDL_Ledger.drawio.png) + +## Step 1. Subscribe to ScalarDL Ledger in AWS Marketplace + +You must get the ScalarDL Ledger container image from [AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-3jdwfmqonx7a2) and subscribe to ScalarDL. For details on how to subscribe to ScalarDL Ledger in AWS Marketplace, see [Subscribe to Scalar products from AWS Marketplace](./AwsMarketplaceGuide.md#subscribe-to-scalar-products-from-aws-marketplace). + +## Step 2. Create an EKS cluster + +You must create an EKS cluster for the ScalarDL Ledger deployment. For details, see [Guidelines for creating an Amazon EKS cluster for Scalar products](./CreateEKSClusterForScalarProducts.md). + +## Step 3. Set up a database for ScalarDL Ledger + +You must prepare a database before deploying ScalarDL Ledger. Because ScalarDL Ledger uses ScalarDB internally to access databases, refer to [ScalarDB Supported Databases](https://github.com/scalar-labs/scalardb/blob/master/docs/scalardb-supported-databases.md) to see which types of databases ScalarDB supports. + +For details on setting up a database, see [Set up a database for ScalarDB/ScalarDL deployment on AWS](./SetupDatabaseForAWS.md). + +## Step 4. Create a bastion server + +To execute some tools for deploying and managing ScalarDL Ledger on EKS, you must prepare a bastion server in the same Amazon Virtual Private Cloud (VPC) of the EKS cluster you created in **Step 2**. For details, see [Create a Bastion Server](./CreateBastionServer.md). + +## Step 5. Prepare custom values files for the Scalar Helm Charts for both ScalarDL Ledger and ScalarDL Schema Loader + +To perform tasks, like accessing information in the database that you created in **Step 3**, you must configure custom values files for the Scalar Helm Charts for both ScalarDL Ledger and ScalarDL Schema Loader (for Ledger) based on your environment. For details, see [Configure a custom values file for Scalar Helm Charts](https://github.com/scalar-labs/helm-charts/blob/main/docs/configure-custom-values-file.md). + +## Step 6. Deploy ScalarDL Ledger by using the Scalar Helm Chart + +Deploy ScalarDL Ledger in your EKS cluster by using the Helm Chart for ScalarDL Ledger. For details, see [Deploy Scalar products using Scalar Helm Charts](https://github.com/scalar-labs/helm-charts/blob/main/docs/how-to-deploy-scalar-products.md). + +**Note:** We recommend creating a dedicated namespace by using the `kubectl create ns scalardl-ledger` command and deploying ScalarDL Ledger in the namespace by using the `-n scalardl-ledger` option with the `helm install` command. + +## Step 7. Check the status of your ScalarDL Ledger deployment + +After deploying ScalarDL Ledger in your EKS cluster, you must check the status of each component. For details, see [Components to Regularly Check When Running in a Kubernetes Environment](./RegularCheck.md). + +## Step 8. Monitor your ScalarDL Ledger deployment + +After deploying ScalarDL Ledger in your EKS cluster, we recommend monitoring the deployed components and collecting their logs, especially in production. For details, see [Monitoring Scalar products on a Kubernetes cluster](./K8sMonitorGuide.md) and [Collecting logs from Scalar products on a Kubernetes cluster](./K8sLogCollectionGuide.md). + +## Remove ScalarDL Ledger from EKS + +If you want to remove the environment that you created, please remove all the resources in reverse order from which you created them in. diff --git a/docs/3.12/scalar-kubernetes/NetworkPeeringForScalarDLAuditor.md b/docs/3.12/scalar-kubernetes/NetworkPeeringForScalarDLAuditor.md new file mode 100644 index 00000000..452150fd --- /dev/null +++ b/docs/3.12/scalar-kubernetes/NetworkPeeringForScalarDLAuditor.md @@ -0,0 +1,51 @@ +# Configure Network Peering for ScalarDL Auditor Mode + +This document explains how to connect multiple private networks for ScalarDL Auditor mode to perform network peering. For ScalarDL Auditor mode to work properly, you must connect ScalarDL Ledger to ScalarDL Auditor. + +## What network you must connect + +To make ScalarDL Auditor mode (Byzantine fault detection) work properly, you must connect three private networks. + +* [ScalarDL Ledger network] <-> [ScalarDL Auditor network] +* [ScalarDL Ledger network] <-> [application (client) network] +* [ScalarDL Auditor network] <-> [application (client) network] + +## Network requirements + +### IP address ranges + +To avoid conflicting IP addresses between the private networks, you must have private networks with different IP address ranges. For example: + +* **Private network for ScalarDL Ledger:** 10.1.0.0/16 +* **Private network for ScalarDL Auditor:** 10.2.0.0/16 +* **Private network for application (client):** 10.3.0.0/16 + +### Connections + +The default network ports for connecting ScalarDL Ledger, ScalarDL Auditor, and the application (client) by default are as follows. You must allow these connections between each private network. + +* **ScalarDL Ledger** + * **50051/TCP:** Accept requests from an application (client) and ScalarDL Auditor via Scalar Envoy. + * **50052/TCP:** Accept privileged requests from an application (client) and ScalarDL Auditor via Scalar Envoy. +* **ScalarDL Auditor** + * **40051/TCP:** Accept requests from an application (client) and ScalarDL Ledger via Scalar Envoy. + * **40052/TCP:** Accept privileged requests from an application (client) and ScalarDL Ledger via Scalar Envoy. +* **Scalar Envoy** (used with ScalarDL Ledger and ScalarDL Auditor) + * **50051/TCP:** Accept requests for ScalarDL Ledger from an application (client) and ScalarDL Auditor. + * **50052/TCP:** Accept privileged requests for ScalarDL Ledger from an application (client) and ScalarDL Auditor. + * **40051/TCP:** Accept requests for ScalarDL Auditor from an application (client) and ScalarDL Ledger. + * **40052/TCP:** Accept privileged requests for ScalarDL Auditor from an application (client) and ScalarDL Ledger. + +Note that, if you change the listening port for ScalarDL in the configuration file (ledger.properties or auditor.properties) from the default, you must allow the connections by using the port that you configured. + +## Private-network peering + +For details on how to connect private networks in each cloud, see official documents. + +### Amazon VPC peering + +For details on how to peer virtual private clouds (VPCs) in an Amazon Web Services (AWS) environment, see the official documentation from Amazon at [Create a VPC peering connection](https://docs.aws.amazon.com/vpc/latest/peering/create-vpc-peering-connection.html). + +### Azure VNet peering + +For details on how to peer virtual networks in an Azure environment, see the official documentation from Microsoft at [Virtual network peering](https://learn.microsoft.com/en-us/azure/virtual-network/virtual-network-peering-overview). diff --git a/docs/3.12/scalar-kubernetes/ProductionChecklistForScalarDBCluster.md b/docs/3.12/scalar-kubernetes/ProductionChecklistForScalarDBCluster.md new file mode 100644 index 00000000..38f7434c --- /dev/null +++ b/docs/3.12/scalar-kubernetes/ProductionChecklistForScalarDBCluster.md @@ -0,0 +1,154 @@ +# Production checklist for ScalarDB Cluster + +This checklist provides recommendations when deploying ScalarDB Cluster in a production environment. + +## Before you begin + +In this checklist, we assume that you are deploying ScalarDB Cluster on a managed Kubernetes cluster, which is recommended. + +## Production checklist: ScalarDB Cluster + +The following is a checklist of recommendations when setting up ScalarDB Cluster in a production environment. + +### Number of pods and Kubernetes worker nodes + +To ensure that the Kubernetes cluster has high availability, you should use at least three worker nodes and deploy at least three pods spread across the worker nodes. You can see the [sample configurations](../conf/scalardb-cluster-custom-values-indirect-mode.yaml) of `podAntiAffinity` for making three pods spread across the worker nodes. + +{% capture notice--info %} +**Note** + +If you place the worker nodes in different availability zones (AZs), you can withstand an AZ failure. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Worker node specifications + +From the perspective of commercial licenses, resources for one pod running ScalarDB Cluster are limited to 2vCPU / 4GB memory. In addition, some pods other than ScalarDB Cluster pods exist on the worker nodes. + +In other words, the following components could run on one worker node: + +* ScalarDB Cluster pod (2vCPU / 4GB) +* Envoy proxy (if you use `indirect` client mode or use a programming language other than Java) +* Your application pods (if you choose to run your application's pods on the same worker node) +* Monitoring components (if you deploy monitoring components such `kube-prometheus-stack`) +* Kubernetes components + +{% capture notice--info %} +**Note** + +You do not need to deploy an Envoy pod when using `direct-kubernetes` mode. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +With this in mind, you should use a worker node that has at least 4vCPU / 8GB memory resources and use at least three worker nodes for availability, as mentioned in [Number of pods and Kubernetes worker nodes](./ProductionChecklistForScalarDBCluster.md#number-of-pods-and-kubernetes-worker-nodes). + +However, three nodes with at least 4vCPU / 8GB memory resources per node is the minimum for a production environment. You should also consider the resources of the Kubernetes cluster (for example, the number of worker nodes, vCPUs per node, memories per node, ScalarDB Cluster pods, and pods for your application), which depend on your system's workload. In addition, if you plan to scale the pods automatically by using some features like [Horizontal Pod Autoscaling (HPA)](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/), you should consider the maximum number of pods on the worker node to decide on the worker node resources. + +### Network + +You should create the Kubernetes cluster on a private network since ScalarDB Cluster does not provide any services to users directly via internet access. We recommend accessing ScalarDB Cluster via a private network from your applications. + +### Monitoring and logging + +You should monitor the deployed components and collect their logs. For details, see [Monitoring Scalar products on a Kubernetes cluster](./K8sMonitorGuide.md) and [Collecting logs from Scalar products on a Kubernetes cluster](./K8sLogCollectionGuide.md). + +### Backup and restore + +You should enable the automatic backup feature and point-in-time recovery (PITR) feature in the backend database. For details, see [Set up a database for ScalarDB/ScalarDL deployment](./SetupDatabase.md). + +## Production checklist: Client applications that access ScalarDB Cluster + +The following is a checklist of recommendations when setting up a client application that accesses ScalarDB Cluster in a production environment. + +### Client mode (Java client library only) + +When using Java for your application, you can use an official Java client library. In this case, you can choose one of the two client modes: [`direct-kubernetes mode`](https://github.com/scalar-labs/scalardb-cluster/blob/main/docs/developer-guide-for-scalardb-cluster-with-java-api.md#direct-kubernetes-client-mode) or [`indirect mode`](https://github.com/scalar-labs/scalardb-cluster/blob/main/docs/developer-guide-for-scalardb-cluster-with-java-api.md#indirect-client-mode). + +From the perspective of performance, we recommend using `direct-kubernetes` mode. To use `direct-kubernetes` mode, you must deploy your application pods on the same Kubernetes cluster as ScalarDB Cluster pods. In this case, you don't need to deploy Envoy pods. + +If you can't deploy your Java application pods on the same Kubernetes cluster as ScalarDB Cluster pods for some reason, you must use `indirect` mode. In this case, you must deploy Envoy pods. + +{% capture notice--info %} +**Note** + +The client mode configuration is dedicated to the Java client library. If you use a programming language other than Java for your application (essentially, if you use the [gRPC API](https://github.com/scalar-labs/scalardb-cluster/blob/main/docs/scalardb-cluster-grpc-api-guide.md) or [gRPC SQL API](https://github.com/scalar-labs/scalardb-cluster/blob/main/docs/scalardb-cluster-sql-grpc-api-guide.md) directly from the programming language), no such configuration exists. In this case, you must deploy Envoy pods. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Transaction manager configuration (Java client library only) + +The client application must always access the database through ScalarDB Cluster. To ensure requests are running properly, check the properties file for your client application and confirm that `scalar.db.transaction_manager=cluster` is configured when using the CRUD API. + +#### Recommended for production environments + +```mermaid +flowchart LR + app["App
ScalarDB Cluster Library with gRPC"] + server["ScalarDB Cluster
ScalarDB Library with
Consensus Commit"] + db[(Underlying storage or database)] + app --> server --> db +``` + +#### Not recommended for production environments (for testing purposes only) + +```mermaid +flowchart LR + app["App
ScalarDB Cluster Library with
Consensus Commit"] + db[(Underlying storage or database)] + app --> db +``` + +### SQL connection configuration (Java client library only) + +The client application must always access the database through ScalarDB Cluster. To ensure requests are running properly, check the properties file for your client application and confirm that `scalar.db.sql.connection_mode=cluster` is configured when using the SQL API. + +#### Recommended for production environments + +```mermaid +flowchart LR + app["App
ScalarDB SQL Library (Cluster mode)"] + server["ScalarDB Cluster
ScalarDB Library with
Consensus Commit"] + db[(Underlying storage or database)] + app --> server --> db +``` + +#### Not recommended for production environments (for testing purposes only) + +```mermaid +flowchart LR + app["App
ScalarDB SQL Library (Direct mode)"] + db[(Underlying storage or database)] + app --> db +``` + +### Deployment of the client application when using `direct-kubernetes` client mode (Java client library only) + +If you use [`direct-kubernetes` client mode](https://github.com/scalar-labs/scalardb-cluster/blob/main/docs/developer-guide-for-scalardb-cluster-with-java-api.md#direct-kubernetes-client-mode), you must deploy your client application on the same Kubernetes cluster as the ScalarDB Cluster deployment. + +Also, when using `direct-kubernetes` client mode, you must deploy additional Kubernetes resources to make your client application work properly. For details, see [Deploy your client application on Kubernetes with `direct-kubernetes` mode](https://github.com/scalar-labs/helm-charts/blob/main/docs/how-to-deploy-scalardb-cluster.md#deploy-your-client-application-on-kubernetes-with-direct-kubernetes-mode). + +### Transaction handling (Java client library and gRPC API) + +You must make sure that your application always runs [`commit()`](https://github.com/scalar-labs/scalardb/blob/master/docs/api-guide.md#commit-a-transaction) or [`rollback()`](https://github.com/scalar-labs/scalardb/blob/master/docs/api-guide.md#roll-back-or-abort-a-transaction) after you [`begin()`](https://github.com/scalar-labs/scalardb/blob/master/docs/api-guide.md#begin-or-start-a-transaction) a transaction. If the application does not run `commit()` or `rollback()`, your application might experience unexpected issues or read inconsistent data from the backend database. + +{% capture notice--info %} +**Note** + +If you use the [gRPC API](https://github.com/scalar-labs/scalardb-cluster/blob/main/docs/scalardb-cluster-grpc-api-guide.md) or [SQL gRPC API](https://github.com/scalar-labs/scalardb-cluster/blob/main/docs/scalardb-cluster-sql-grpc-api-guide.md), your application should call a `Commit` or `Rollback` service after you call a `Begin` service to begin a transaction. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Exception handling (Java client library and gRPC API) + +You must make sure that your application handles transaction exceptions. For details, see the document for the API that you are using: + +* [Handle exceptions (Transactional API)](https://github.com/scalar-labs/scalardb/blob/master/docs/api-guide.md#handle-exceptions). +* [Handle exceptions (two-phase commit transactions API)](https://github.com/scalar-labs/scalardb/blob/master/docs/two-phase-commit-transactions.md#handle-exceptions) +* [Execute transactions (ScalarDB SQL API)](https://github.com/scalar-labs/scalardb-sql/blob/main/docs/sql-api-guide.md#execute-transactions) +* [Handle SQLException (ScalarDB JDBC)](https://github.com/scalar-labs/scalardb-sql/blob/main/docs/jdbc-guide.md#handle-sqlexception) +* [Error handling (ScalarDB Cluster gRPC API)](https://github.com/scalar-labs/scalardb-cluster/blob/main/docs/scalardb-cluster-grpc-api-guide.md#error-handling-1) +* [Error handling (ScalarDB Cluster SQL gRPC API)](https://github.com/scalar-labs/scalardb-cluster/blob/main/docs/scalardb-cluster-sql-grpc-api-guide.md#error-handling-1) diff --git a/docs/3.12/scalar-kubernetes/ProductionChecklistForScalarProducts.md b/docs/3.12/scalar-kubernetes/ProductionChecklistForScalarProducts.md new file mode 100644 index 00000000..8339195d --- /dev/null +++ b/docs/3.12/scalar-kubernetes/ProductionChecklistForScalarProducts.md @@ -0,0 +1,5 @@ +# Production checklist for Scalar products + +To make your deployment ready for production, refer to the following: + +* [Production checklist for ScalarDB Cluster](./ProductionChecklistForScalarDBCluster.md) diff --git a/docs/3.12/scalar-kubernetes/RegularCheck.md b/docs/3.12/scalar-kubernetes/RegularCheck.md new file mode 100644 index 00000000..18536899 --- /dev/null +++ b/docs/3.12/scalar-kubernetes/RegularCheck.md @@ -0,0 +1,73 @@ +# Components to Regularly Check When Running in a Kubernetes Environment + +Most of the components deployed by manual deployment guides are self-healing with the help of the managed Kubernetes services and Kubernetes self-healing capability. There are also configured alerts that occur when some unexpected behavior happens. Thus, there shouldn't be so many things to do day by day for the deployment of Scalar products on the managed Kubernetes cluster. However, it is recommended to check the status of a system on a regular basis to see if everything is working fine. Here is the list of things you might want to do on a regular basis. + +## Kubernetes resources + +### Check if Pods are all healthy statues + +Please check the Kubernetes namespaces: + +* `default` (or specified namespace when you deploy Scalar products) for the Scalar product deployment +* `monitoring` for the Prometheus Operator and Loki + +What to check: + +* `STATUS` is all `Running` +* Pods are evenly distributed on the different nodes + +```console +$ kubectl get pod -o wide -n +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +scalardb-7876f595bd-2jb28 1/1 Running 0 2m35s 10.244.2.6 k8s-worker2 +scalardb-7876f595bd-rfvk6 1/1 Running 0 2m35s 10.244.1.8 k8s-worker +scalardb-7876f595bd-xfkv4 1/1 Running 0 2m35s 10.244.3.8 k8s-worker3 +scalardb-envoy-84c475f77b-cflkn 1/1 Running 0 2m35s 10.244.1.7 k8s-worker +scalardb-envoy-84c475f77b-tzmc9 1/1 Running 0 2m35s 10.244.3.7 k8s-worker3 +scalardb-envoy-84c475f77b-vztqr 1/1 Running 0 2m35s 10.244.2.5 k8s-worker2 +``` + +```console +$ kubectl get pod -n monitoring -o wide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +alertmanager-scalar-monitoring-kube-pro-alertmanager-0 2/2 Running 1 (11m ago) 12m 10.244.2.4 k8s-worker2 +prometheus-scalar-monitoring-kube-pro-prometheus-0 2/2 Running 0 12m 10.244.1.5 k8s-worker +scalar-logging-loki-0 1/1 Running 0 13m 10.244.2.2 k8s-worker2 +scalar-logging-loki-promtail-2c4k9 0/1 Running 0 13m 10.244.0.5 k8s-control-plane +scalar-logging-loki-promtail-8r48b 1/1 Running 0 13m 10.244.3.2 k8s-worker3 +scalar-logging-loki-promtail-b26c6 1/1 Running 0 13m 10.244.2.3 k8s-worker2 +scalar-logging-loki-promtail-sks56 1/1 Running 0 13m 10.244.1.2 k8s-worker +scalar-monitoring-grafana-77c4dbdd85-4mrn7 3/3 Running 0 12m 10.244.3.4 k8s-worker3 +scalar-monitoring-kube-pro-operator-7575dd8bbd-bxhrc 1/1 Running 0 12m 10.244.1.3 k8s-worker +``` + +### Check if Nodes are all healthy statuses + +What to check: + +* `STATUS` is all `Ready` + +```console +$ kubectl get nodes +NAME STATUS ROLES AGE VERSION +k8s-control-plane Ready control-plane 16m v1.25.3 +k8s-worker Ready 15m v1.25.3 +k8s-worker2 Ready 15m v1.25.3 +k8s-worker3 Ready 15m v1.25.3 +``` + +## Prometheus dashboard (Alerts of Scalar products) + +Access to the Prometheus dashboard according to the document [Monitoring Scalar products on the Kubernetes cluster](./K8sMonitorGuide.md). In the **Alerts** tab, you can see the alert status. + +What to check: + +* All alerts are **green (Inactive)** + +If some issue is occurring, it shows you **red (Firing)** status. + +## Grafana dashboard (metrics of Scalar products) + +Access to the Grafana dashboard according to the document [Monitoring Scalar products on the Kubernetes cluster](./K8sMonitorGuide.md). In the **Dashboards** tab, you can see the dashboard of Scalar products. In these dashboards, you can see some metrics of Scalar products. + +Those dashboards cannot address issues directly, but you can see changes from normal (e.g., increasing transaction errors) to get hints for investigating issues. diff --git a/docs/3.12/scalar-kubernetes/RestoreDatabase.md b/docs/3.12/scalar-kubernetes/RestoreDatabase.md new file mode 100644 index 00000000..d84b5aec --- /dev/null +++ b/docs/3.12/scalar-kubernetes/RestoreDatabase.md @@ -0,0 +1,169 @@ +# Restore databases in a Kubernetes environment + +This guide explains how to restore databases that ScalarDB or ScalarDL uses in a Kubernetes environment. Please note that this guide assumes that you are using a managed database from a cloud services provider as the backend database for ScalarDB or ScalarDL. + +## Procedure to restore databases + +1. Scale in ScalarDB or ScalarDL pods to **0** to stop requests to the backend databases. You can scale in the pods to **0** by using the `--set *.replicaCount=0` flag in the helm command. + * ScalarDB Server + + ```console + helm upgrade scalar-labs/scalardb -n -f /path/to/ --set scalardb.replicaCount=0 + ``` + + * ScalarDL Ledger + + ```console + helm upgrade scalar-labs/scalardl -n -f /path/to/ --set ledger.replicaCount=0 + ``` + + * ScalarDL Auditor + + ```console + helm upgrade scalar-labs/scalardl-audit -n -f /path/to/ --set auditor.replicaCount=0 + ``` + +2. Restore the databases by using the point-in-time recovery (PITR) feature. + + For details on how to restore the databases based on your managed database, please refer to the [Supplemental procedures to restore databases based on managed database](./RestoreDatabase.md#supplemental-procedures-to-restore-databases-based-on-managed-database) section in this guide. + + If you are using NoSQL or multiple databases, you should specify the middle point of the pause duration period that you created when following the backup procedure in [Back up a NoSQL database in a Kubernetes environment](./BackupNoSQL.md). +3. Update **database.properties**, **ledger.properties**, or **auditor.properties** based on the newly restored database. + + Because the PITR feature restores databases as another instance, you must update the endpoint information in the custom values file of ScalarDB or ScalarDL to access the newly restored databases. For details on how to configure the custom values file, see [Configure a custom values file for Scalar Helm Charts](https://github.com/scalar-labs/helm-charts/blob/main/docs/configure-custom-values-file.md). + + Please note that, if you are using Amazon DynamoDB, your data will be restored with another table name instead of another instance. In other words, the endpoint will not change after restoring the data. Instead, you will need to restore the data by renaming the tables in Amazon DynamoDB. For details on how to restore data with the same table name, please see the [Amazon DynamoDB](./RestoreDatabase.md#amazon-dynamodb) section in this guide. +4. Scale out the ScalarDB or ScalarDL pods to **1** or more to start accepting requests from clients by using the `--set *.replicaCount=N` flag in the helm command. + * ScalarDB Server + + ```console + helm upgrade scalar-labs/scalardb -n -f /path/to/ --set scalardb.replicaCount=3 + ``` + + * ScalarDL Ledger + + ```console + helm upgrade scalar-labs/scalardl -n -f /path/to/ --set ledger.replicaCount=3 + ``` + + * ScalarDL Auditor + + ```console + helm upgrade scalar-labs/scalardl-audit -n -f /path/to/ --set auditor.replicaCount=3 + ``` + +## Supplemental procedures to restore data based on managed database + +### Amazon DynamoDB + +When using the PITR feature, Amazon DynamoDB restores data with another table name. Therefore, you must follow additional steps to restore data with the same table name. + +#### Steps + +1. Create a backup. + 1. Select the middle point of the pause duration period as the restore point. + 2. Use PITR to restore table A to table B. + 3. Perform a backup of the restored table B. Then, confirm the backup is named appropriately for backup B. + 4. Remove table B. + + For details on how to restore DynamoDB tables by using PITR and how to perform a backup of DynamoDB tables manually, see the following official documentation from Amazon: + + * [Restoring a DynamoDB table to a point in time](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/PointInTimeRecovery.Tutorial.html) + * [Backing up a DynamoDB table](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Backup.Tutorial.html) + + You can do this **Create a backup** step as a part of backup operations in the [Back up a NoSQL database in a Kubernetes environment](./BackupNoSQL.md#create-a-period-to-restore-data-and-perform-a-backup). + +2. Restore from the backup. + 1. Remove table A. + 2. Create a table named A by using backup B. + +3. Update the table configuration if necessary, depending on your environment. + + Some configurations, like autoscaling policies, are not set after restoring, so you may need to manually set those configurations depending on your needs. For details, see the official documentation from Amazon at [Backing up and restoring DynamoDB tables with DynamoDB: How it works](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/CreateBackup.html). + + For example, if you are using ScalarDB Schema Loader or ScalarDL Schema Loader to create tables, autoscaling is enabled by default. Therefore, you will need to manually enable autoscaling for the restored tables in DynamoDB. For details on how to enable autoscaling in DynamoDB, see the official documentation from Amazon at [Enabling DynamoDB auto scaling on existing tables](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/AutoScaling.Console.html#AutoScaling.Console.ExistingTable). + + In addition, after restoring the databases, the PITR feature will be disabled and the read/write capacity mode is reset to the default value. If necessary, depending on your environment, you will need to manually set these configurations. For some configurations for restored tables, see [Set up a database for ScalarDB/ScalarDL deployment on AWS (Amazon DynamoDB)](./SetupDatabaseForAWS.md#amazon-dynamodb). + +### Azure Cosmos DB for NoSQL + +When using the PITR feature, Azure Cosmos DB restores data by using another account. Therefore, you must update the endpoint configuration in the custom values file. + +#### Steps + +1. Restore the account. For details on how to restore an Azure Cosmos DB account by using PITR, see [Restore an Azure Cosmos DB account that uses continuous backup mode](https://learn.microsoft.com/en-us/azure/cosmos-db/restore-account-continuous-backup). + +2. Change the **default consistency level** for the restored account from the default value to **Strong**. For details on how to change this value, see the official documentation from Microsoft a [Configure the default consistency level](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/how-to-manage-consistency#configure-the-default-consistency-level). + +3. Update **database.properties** for ScalarDB Schema Loader or ScalarDL Schema Loader based on the newly restored account. + + ScalarDB implements the Cosmos DB adapter by using its stored procedures, which are installed when creating schemas by using ScalarDB Schema Loader or ScalarDL Schema Loader. However, the PITR feature in Cosmos DB does not restore stored procedures, so you will need to reinstall the required stored procedures for all tables after restoration. You can reinstall the required stored procedures by using the `--repair-all` option in ScalarDB Schema Loader or ScalarDL Schema Loader. + * **ScalarDB tables:** For details on how to configure **database.properties** for ScalarDB Schema Loader, see [Configure ScalarDB for Cosmos DB for NoSQL](https://github.com/scalar-labs/scalardb/blob/master/docs/getting-started-with-scalardb.md#configure-scalardb-1). + + * **ScalarDL tables:** For details on how to configure the custom values file for ScalarDL Schema Loader, see [Configure a custom values file for ScalarDL Schema Loader](https://github.com/scalar-labs/helm-charts/blob/main/docs/configure-custom-values-scalardl-schema-loader.md). + +4. Re-create the stored procedures by using the `--repair-all` flag in ScalarDB Schema Loader or ScalarDL Schema Loader as follows: + + * ScalarDB tables + + ```console + java -jar scalardb-schema-loader-.jar --config /path/to/ -f /path/to/ [--coordinator] --repair-all + ``` + + * ScalarDL Ledger tables + + ```console + helm install repair-schema-ledger scalar-labs/schema-loading -n -f /path/to/ --set "schemaLoading.commandArgs={--repair-all}" + ``` + + * ScalarDL Auditor tables + + ```console + helm install repair-schema-auditor scalar-labs/schema-loading -n -f /path/to/ --set "schemaLoading.commandArgs={--repair-all}" + ``` + + For more details on repairing tables in ScalarDB Schema Loader, see [Repair tables](https://github.com/scalar-labs/scalardb/blob/master/docs/schema-loader.md#repair-tables). + +5. Update the table configuration if necessary, depending on your environment. For some configurations for restored accounts, see [Set up a database for ScalarDB/ScalarDL deployment on Azure (Azure Cosmos DB for NoSQL)](./SetupDatabaseForAzure.md#azure-cosmos-db-for-nosql). + +### Amazon RDS + +When using the PITR feature, Amazon RDS restores data by using another database instance. Therefore, you must update the endpoint configuration in the custom values file. + +#### Steps + +1. Restore the database instance. For details on how to restore the Amazon RDS instance by using PITR, see the following official documentation from Amazon: + * [Restoring a DB instance to a specified time](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIT.html) + * [Restoring a Multi-AZ DB cluster to a specified time](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIT.MultiAZDBCluster.html) + +2. Update the table configuration if necessary, depending on your environment. For some configurations for the restored database instance, see [Set up a database for ScalarDB/ScalarDL deployment on AWS (Amazon RDS for MySQL, PostgreSQL, Oracle, and SQL Server)](./SetupDatabaseForAWS.md#amazon-rds-for-mysql-postgresql-oracle-and-sql-server). + +### Amazon Aurora + +When using the PITR feature, Amazon Aurora restores data by using another database cluster. Therefore, you must update the endpoint configuration in the custom values file. + +#### Steps + +1. Restore the database cluster. For details on how to restore an Amazon Aurora cluster by using PITR. see the official documentation from Amazon at [Restoring a DB cluster to a specified time](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-pitr.html). + +2. Add a replica (reader) to make the database cluster a Multi-AZ cluster if necessary, depending on your environment. + + The PITR feature in Amazon Aurora cannot restore a database cluster by using a Multi-AZ configuration. If you want to restore the database cluster as a Multi-AZ cluster, you must add a reader after restoring the database cluster. For details on how to add a reader, see the official documentation from Amazon at [Adding Aurora Replicas to a DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-replicas-adding.html). + +3. Update the table configuration if necessary, depending on your environment. For some configurations for the restored database cluster, see [Set up a database for ScalarDB/ScalarDL deployment on AWS (Amazon Aurora MySQL and Amazon Aurora PostgreSQL)](./SetupDatabaseForAWS.md#amazon-aurora-mysql-and-amazon-aurora-postgresql). + +### Azure Database for MySQL/PostgreSQL + +When using the PITR feature, Azure Database for MySQL/PostgreSQL restores data by using another server. Therefore, you must update the endpoint configuration in the custom values file. + +#### Steps + +1. Restore the database server. For details on how to restore an Azure Database for MySQL/PostgreSQL server by using PITR, see the following: + + * [Point-in-time restore of a Azure Database for MySQL Flexible Server using Azure portal](https://learn.microsoft.com/en-us/azure/mysql/flexible-server/how-to-restore-server-portal) + * [Backup and restore in Azure Database for PostgreSQL - Flexible Server](https://learn.microsoft.com/en-us/azure/postgresql/flexible-server/concepts-backup-restore) + +2. Update the table configuration if necessary, depending on your environment. For some configurations for the restored database server, see the following: + + * [Set up a database for ScalarDB/ScalarDL deployment on Azure (Azure Database for MySQL)](./SetupDatabaseForAzure.md#azure-database-for-mysql) + * [Set up a database for ScalarDB/ScalarDL deployment on Azure (Azure Database for PostgreSQL)](./SetupDatabaseForAzure.md#azure-database-for-postgresql) diff --git a/docs/3.12/scalar-kubernetes/SetupDatabase.md b/docs/3.12/scalar-kubernetes/SetupDatabase.md new file mode 100644 index 00000000..c8b302a1 --- /dev/null +++ b/docs/3.12/scalar-kubernetes/SetupDatabase.md @@ -0,0 +1,6 @@ +# Set up a database for ScalarDB/ScalarDL deployment + +This guide explains how to set up a database for ScalarDB/ScalarDL deployment on cloud services. + +* [Set up a database for ScalarDB/ScalarDL deployment on AWS](./SetupDatabaseForAWS.md) +* [Set up a database for ScalarDB/ScalarDL deployment on Azure](./SetupDatabaseForAzure.md) diff --git a/docs/3.12/scalar-kubernetes/SetupDatabaseForAWS.md b/docs/3.12/scalar-kubernetes/SetupDatabaseForAWS.md new file mode 100644 index 00000000..22faf183 --- /dev/null +++ b/docs/3.12/scalar-kubernetes/SetupDatabaseForAWS.md @@ -0,0 +1,175 @@ +# Set up a database for ScalarDB/ScalarDL deployment on AWS + +This guide explains how to set up a database for ScalarDB/ScalarDL deployment on AWS. + +## Amazon DynamoDB + +### Authentication method + +When you use DynamoDB, you must set `REGION`, `ACCESS_KEY_ID`, and `SECRET_ACCESS_KEY` in the ScalarDB/ScalarDL properties file as follows. + +```properties +scalar.db.contact_points= +scalar.db.username= +scalar.db.password= +scalar.db.storage=dynamo +``` + +Please refer to the following document for more details on the properties for DynamoDB. + +* [Configure ScalarDB for DynamoDB](https://github.com/scalar-labs/scalardb/blob/master/docs/getting-started-with-scalardb.md#configure-scalardb-2) + +### Required configuration/steps + +DynamoDB is available for use in AWS by default. You do not need to set up anything manually to use it. + +### Optional configurations/steps + +#### Enable point-in-time recovery (Recommended in the production environment) + +You can enable PITR as a backup/restore method for DynamoDB. If you use [ScalarDB Schema Loader](https://github.com/scalar-labs/scalardb/blob/master/docs/schema-loader.md) for creating schema, it enables the PITR feature for tables by default. Please refer to the official document for more details. + +* [Point-in-time recovery for DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/PointInTimeRecovery.html) + +It is recommended since the point-in-time recovery feature automatically and continuously takes backups so that you can reduce downtime (pause duration) for backup operations. Please refer to the following document for more details on how to backup/restore Scalar product data. + +* [Backup restore guide for Scalar products](./BackupRestoreGuide.md) + +#### Configure monitoring (Recommended in the production environment) + +You can configure the monitoring and logging of DynamoDB using its native feature. Please refer to the official document for more details. + +* [Monitoring and logging](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/monitoring.html) + +It is recommended since the metrics and logs help you to investigate some issues in the production environment when they happen. + +#### Use VPC endpoint (Recommended in the production environment) + +// Note that We have not yet tested this feature with Scalar products. +// TODO: We need to test this feature with Scalar products. + +* [Using Amazon VPC endpoints to access DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/vpc-endpoints-dynamodb.html) + +It is recommended since the private internal connections not via WAN can make a system more secure. + +#### Configure Read/Write Capacity (Optional based on your environment) + +You can configure the **Read/Write Capacity** of DynamoDB tables based on your requirements. Please refer to the official document for more details on Read/Write Capacity. + +* [Read/write capacity mode](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html) + +You can configure Read/Write Capacity using ScalarDB/DL Schema Loader when you create a table. Please refer to the following document for more details on how to configure Read/Write Capacity (RU) using ScalarDB/DL Schema Loader. + +* [ScalarDB Schema Loader](https://github.com/scalar-labs/scalardb/blob/master/docs/schema-loader.md) + +## Amazon RDS for MySQL, PostgreSQL, Oracle, and SQL Server + +### Authentication method + +When you use RDS, you must set `JDBC_URL`, `USERNAME`, and `PASSWORD` in the ScalarDB/ScalarDL properties file as follows. + +```properties +scalar.db.contact_points= +scalar.db.username= +scalar.db.password= +scalar.db.storage=jdbc +``` + +Please refer to the following document for more details on the properties for RDS (JDBC databases). + +* [Configure ScalarDB for JDBC databases](https://github.com/scalar-labs/scalardb/blob/master/docs/getting-started-with-scalardb.md#configure-scalardb-3) + +### Required configuration/steps + +#### Create an RDS database instance + +You must create an RDS database instance. Please refer to the official document for more details. + +* [Configuring an Amazon RDS DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_RDS_Configuring.html) + +### Optional configurations/steps + +#### Enable automated backups (Recommended in the production environment) + +You can enable automated backups. Please refer to the official document for more details. + +* [Working with backups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithAutomatedBackups.html) + +It is recommended since the automated backups feature enables a point-in-time recovery feature. It can recover data to a specific point in time. It can reduce downtime (pause duration) for backup operations when you use multi databases under Scalar products. Please refer to the following document for more details on how to backup/restore the Scalar product data. + +* [Backup restore guide for Scalar products](./BackupRestoreGuide.md) + +#### Configure monitoring (Recommended in the production environment) + +You can configure the monitoring and logging of RDS using its native feature. Please refer to the official documents for more details. + +* [Monitoring metrics in an Amazon RDS instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Monitoring.html) +* [Monitoring events, logs, and streams in an Amazon RDS DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Monitor_Logs_Events.html) + +It is recommended since the metrics and logs help you to investigate some issues in the production environment when they happen. + +#### Disable public access (Recommended in the production environment) + +Public access is disabled by default. You can access the RDS database instance from the Scalar product pods on your EKS cluster as follows. + +* Create the RDS database instance on the same VPC as your EKS cluster. +* Connect the VPC for the RDS and the VPC for the EKS cluster for the Scalar product deployment using [VPC peering](https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html). (// TODO: We need to test this feature with Scalar products.) + +It is recommended since the private internal connections not via WAN can make a system more secure. + +## Amazon Aurora MySQL and Amazon Aurora PostgreSQL + +### Authentication method + +When you use Amazon Aurora, you must set `JDBC_URL`, `USERNAME`, and `PASSWORD` in the ScalarDB/ScalarDL properties file as follows. + +```properties +scalar.db.contact_points= +scalar.db.username= +scalar.db.password= +scalar.db.storage=jdbc +``` + +Please refer to the following document for more details on the properties for Amazon Aurora (JDBC databases). + +* [Configure ScalarDB for JDBC databases](https://github.com/scalar-labs/scalardb/blob/master/docs/getting-started-with-scalardb.md#configure-scalardb-3) + +### Required configuration/steps + +#### Create an Amazon Aurora DB cluster + +You must create an Amazon Aurora DB cluster. Please refer to the official document for more details. + +* [Configuring your Amazon Aurora DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraSettingUp.html) + +### Optional configurations/steps + +#### Configure backup configurations (Optional based on your environment) + +Amazon Aurora automatically gets a backup by default. You do not need to enable the backup feature manually. + +If you want to change some backup configurations like the backup retention period and backup window, you can configure them. Please refer to the official document for more details. + +* [Backing up and restoring an Amazon Aurora DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/BackupRestoreAurora.html) + +Please refer to the following document for more details on how to backup/restore the Scalar product data. + +* [Backup restore guide for Scalar products](./BackupRestoreGuide.md) + +#### Configure monitoring (Recommended in the production environment) + +You can configure the monitoring and logging of Amazon Aurora using its native feature. Please refer to the official documents for more details. + +* [Monitoring metrics in an Amazon Aurora cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/MonitoringAurora.html) +* [Monitoring events, logs, and streams in an Amazon Aurora DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_Monitor_Logs_Events.html) + +It is recommended since the metrics and logs help you to investigate some issues in the production environment when they happen. + +#### Disable public access (Recommended in the production environment) + +Public access is disabled by default. You can access the Amazon Aurora DB cluster from the Scalar product pods on your EKS cluster as follows. + +* Create the Amazon Aurora DB cluster on the same VPC as your EKS cluster. +* Connect the VPC for the Amazon Aurora DB cluster and the VPC for the EKS cluster for the Scalar product deployment using [VPC peering](https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html). (// TODO: We need to test this feature with Scalar products.) + +It is recommended since the private internal connections not via WAN can make a system more secure. diff --git a/docs/3.12/scalar-kubernetes/SetupDatabaseForAzure.md b/docs/3.12/scalar-kubernetes/SetupDatabaseForAzure.md new file mode 100644 index 00000000..5d0a011b --- /dev/null +++ b/docs/3.12/scalar-kubernetes/SetupDatabaseForAzure.md @@ -0,0 +1,199 @@ +# Set up a database for ScalarDB/ScalarDL deployment on Azure + +This guide explains how to set up a database for ScalarDB/ScalarDL deployment on Azure. + +## Azure Cosmos DB for NoSQL + +### Authentication method + +When you use Cosmos DB for NoSQL, you must set `COSMOS_DB_URI` and `COSMOS_DB_KEY` in the ScalarDB/ScalarDL properties file as follows. + +```properties +scalar.db.contact_points= +scalar.db.password= +scalar.db.storage=cosmos +``` + +Please refer to the following document for more details on the properties for Cosmos DB for NoSQL. + +* [Configure ScalarDB for Cosmos DB for NoSQL](https://github.com/scalar-labs/scalardb/blob/master/docs/getting-started-with-scalardb.md#configure-scalardb-1) + +### Required configuration/steps + +#### Create an Azure Cosmos DB account + +You must create an Azure Cosmos DB account with the NoSQL (core) API. You must set the **Capacity mode** as **Provisioned throughput** when you create it. Please refer to the official document for more details. + +* [Quickstart: Create an Azure Cosmos DB account, database, container, and items from the Azure portal](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/quickstart-portal) + +#### Configure a default consistency configuration + +You must set the **Default consistency level** as **Strong**. Please refer to the official document for more details. + +* [Configure the default consistency level](https://learn.microsoft.com/en-us/azure/cosmos-db/nosql/how-to-manage-consistency#config/ure-the-default-consistency-level) + +### Optional configurations/steps + +#### Configure backup configurations (Recommended in the production environment) + +You can configure **Backup modes** as **Continuous backup mode** for PITR. Please refer to the official document for more details. + +* [Backup modes](https://learn.microsoft.com/en-us/azure/cosmos-db/online-backup-and-restore#backup-modes) + +It is recommended since the continuous backup mode automatically and continuously gets backups so that we can reduce downtime (pause duration) for backup operations. Please refer to the following document for more details on how to backup/restore the Scalar product data. + +* [Backup restore guide for Scalar products](./BackupRestoreGuide.md) + +#### Configure monitoring (Recommended in the production environment) + +You can configure the monitoring of Cosmos DB using its native feature. Please refer to the official document for more details. + +* [Monitor Azure Cosmos DB](https://learn.microsoft.com/en-us/azure/cosmos-db/monitor) + +It is recommended since the metrics and logs help you to investigate some issues in the production environment when they happen. + +#### Enable service endpoint (Recommended in the production environment) + +You can configure the Azure Cosmos DB account to allow access only from a specific subnet of a virtual network (VNet). Please refer to the official document for more details. + +* [Configure access to Azure Cosmos DB from virtual networks (VNet)](https://learn.microsoft.com/en-us/azure/cosmos-db/how-to-configure-vnet-service-endpoint) + +It is recommended since the private internal connections not via WAN can make a system more secure. + +#### Configure the Request Units (Optional based on your environment) + +You can configure the **Request Units** of Cosmos DB based on your requirements. Please refer to the official document for more details on the request units. + +* [Request Units in Azure Cosmos DB](https://learn.microsoft.com/en-us/azure/cosmos-db/request-units) + +You can configure Request Units using ScalarDB/DL Schema Loader when you create a table. Please refer to the following document for more details on how to configure Request Units (RU) using ScalarDB/DL Schema Loader. + +* [ScalarDB Schema Loader](https://github.com/scalar-labs/scalardb/blob/master/docs/schema-loader.md) + +## Azure Database for MySQL + +### Authentication method + +When you use Azure Database for MySQL, you must set `JDBC_URL`, `USERNAME`, and `PASSWORD` in the ScalarDB/ScalarDL properties file as follows. + +```properties +scalar.db.contact_points= +scalar.db.username= +scalar.db.password= +scalar.db.storage=jdbc +``` + +Please refer to the following document for more details on the properties for Azure Database for MySQL (JDBC databases). + +* [Configure ScalarDB for JDBC databases](https://github.com/scalar-labs/scalardb/blob/master/docs/getting-started-with-scalardb.md#configure-scalardb-3) + +### Required configuration/steps + +#### Create a database server + +You must create a database server. Please refer to the official document for more details. + +* [Quickstart: Use the Azure portal to create an Azure Database for MySQL Flexible Server](https://learn.microsoft.com/en-us/azure/mysql/flexible-server/quickstart-create-server-portal) + +You can choose **Single Server** or **Flexible Server** for your deployment. However, Flexible Server is recommended in Azure. This document assumes that you use Flexible Server. Please refer to the official documents for more details on the deployment models. + +* [What is Azure Database for MySQL?](https://learn.microsoft.com/en-us/azure/mysql/single-server/overview#deployment-models) + +### Optional configurations/steps + +#### Configure backup configurations (Optional based on your environment) + +Azure Database for MySQL gets a backup by default. You do not need to enable the backup feature manually. + +If you want to change some backup configurations like the backup retention period, you can configure it. Please refer to the official document for more details. + +* [Backup and restore in Azure Database for MySQL Flexible Server](https://learn.microsoft.com/en-us/azure/mysql/flexible-server/concepts-backup-restore) + +Please refer to the following document for more details on how to backup/restore the Scalar product data. + +* [Backup restore guide for Scalar products](./BackupRestoreGuide.md) + +#### Configure monitoring (Recommended in the production environment) + +You can configure the monitoring of Azure Database for MySQL using its native feature. Please refer to the official document for more details. + +* [Monitor Azure Database for MySQL Flexible Server](https://learn.microsoft.com/en-us/azure/mysql/flexible-server/concepts-monitoring) + +It is recommended since the metrics and logs help you to investigate some issues in the production environment when they happen. + +#### Disable public access (Recommended in the production environment) + +You can configure **Private access (VNet Integration)** as a **Connectivity method**. Please refer to the official document for more details. + +* [Connectivity and networking concepts for Azure Database for MySQL - Flexible Server](https://learn.microsoft.com/en-us/azure/mysql/flexible-server/concepts-networking) + +You can access the database server from the Scalar product pods on your AKS cluster as follows. + +* Create the database server on the same VNet as your AKS cluster. +* Connect the VNet for the database server and the VNet for the AKS cluster for the Scalar product deployment using [Virtual network peering](https://learn.microsoft.com/en-us/azure/virtual-network/virtual-network-peering-overview). (// TODO: We need to test this feature with Scalar products.) + +It is recommended since the private internal connections not via WAN can make a system more secure. + +## Azure Database for PostgreSQL + +### Authentication method + +When you use Azure Database for PostgreSQL, you must set `JDBC_URL`, `USERNAME`, and `PASSWORD` in the ScalarDB/ScalarDL properties file as follows. + +```properties +scalar.db.contact_points= +scalar.db.username= +scalar.db.password= +scalar.db.storage=jdbc +``` + +Please refer to the following document for more details on the properties for Azure Database for PostgreSQL (JDBC databases). + +* [Configure ScalarDB for JDBC databases](https://github.com/scalar-labs/scalardb/blob/master/docs/getting-started-with-scalardb.md#configure-scalardb-3) + +### Required configuration/steps + +#### Create a database server + +You must create a database server. Please refer to the official document for more details. + +* [Quickstart: Create an Azure Database for PostgreSQL - Flexible Server in the Azure portal](https://learn.microsoft.com/en-us/azure/postgresql/flexible-server/quickstart-create-server-portal) + +You can choose **Single Server** or **Flexible Server** for your deployment. However, Flexible Server is recommended in Azure. This document assumes that you use Flexible Server. Please refer to the official documents for more details on the deployment models. + +* [What is Azure Database for PostgreSQL?](https://learn.microsoft.com/en-us/azure/postgresql/single-server/overview#deployment-models) + +### Optional configurations/steps + +#### Configure backup configurations (Optional based on your environment) + +Azure Database for PostgreSQL gets a backup by default. You do not need to enable the backup feature manually. + +If you want to change some backup configurations like the backup retention period, you can configure it. Please refer to the official document for more details. + +* [Backup and restore in Azure Database for PostgreSQL - Flexible Server](https://learn.microsoft.com/en-us/azure/postgresql/flexible-server/concepts-backup-restore) + +Please refer to the following document for more details on how to backup/restore the Scalar product data. + +* [Backup restore guide for Scalar products](./BackupRestoreGuide.md) + +#### Configure monitoring (Recommended in the production environment) + +You can configure the monitoring of Azure Database for PostgreSQL using its native feature. Please refer to the official document for more details. + +* [Monitor metrics on Azure Database for PostgreSQL - Flexible Server](https://learn.microsoft.com/en-us/azure/postgresql/flexible-server/concepts-monitoring) + +It is recommended since the metrics and logs help you to investigate some issues in the production environment when they happen. + +#### Disable public access (Recommended in the production environment) + +You can configure **Private access (VNet Integration)** as a **Connectivity method**. Please refer to the official document for more details. + +* [Networking overview for Azure Database for PostgreSQL - Flexible Server](https://learn.microsoft.com/en-us/azure/postgresql/flexible-server/concepts-networking) + +You can access the database server from the Scalar product pods on your AKS cluster as follows. + +* Create the database server on the same VNet as your AKS cluster. +* Connect the VNet for the database server and the VNet for the AKS cluster for the Scalar product deployment using [Virtual network peering](https://learn.microsoft.com/en-us/azure/virtual-network/virtual-network-peering-overview). (// TODO: We need to test this feature with Scalar products.) + +It is recommended since the private internal connections not via WAN can make a system more secure. diff --git a/docs/3.12/scalar-kubernetes/deploy-kubernetes.md b/docs/3.12/scalar-kubernetes/deploy-kubernetes.md new file mode 100644 index 00000000..3d2e03f8 --- /dev/null +++ b/docs/3.12/scalar-kubernetes/deploy-kubernetes.md @@ -0,0 +1,24 @@ +# Deploying ScalarDB on Managed Kubernetes Services + +The following documentation is to help you set up and deploy a ScalarDB environment on managed Kubernetes services. + +## Database setup guides + +* [Set up a database for ScalarDB/ScalarDL deployment on AWS](SetupDatabaseForAWS.md) +* [Set up a database for ScalarDB/ScalarDL deployment on Azure](SetupDatabaseForAzure.md) + +## Installation guides + +* [Production checklist for ScalarDB Cluster](ProductionChecklistForScalarDBCluster.md) +* [Guidelines for creating an EKS cluster for ScalarDB Cluster](CreateEKSClusterForScalarDBCluster.md) +* [How to install Scalar products through AWS Marketplace](AwsMarketplaceGuide.md) +* [How to install Scalar products through Azure Marketplace](AzureMarketplaceGuide.md) +* [Create a bastion server](CreateBastionServer.md) + +## Deployment guides + +* [Deploy ScalarDB Cluster on Amazon Elastic Kubernetes Service (EKS)](ManualDeploymentGuideScalarDBClusterOnEKS.md) + +## Configuration guides + +* [Make ScalarDB or ScalarDL deployed in a Kubernetes cluster environment available from applications](AccessScalarProducts.md) diff --git a/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDB_Cluster_Direct_Kubernetes_Mode.drawio.png b/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDB_Cluster_Direct_Kubernetes_Mode.drawio.png new file mode 100644 index 00000000..5ceef088 Binary files /dev/null and b/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDB_Cluster_Direct_Kubernetes_Mode.drawio.png differ diff --git a/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDB_Cluster_Indirect_Mode.drawio.png b/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDB_Cluster_Indirect_Mode.drawio.png new file mode 100644 index 00000000..feef0d81 Binary files /dev/null and b/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDB_Cluster_Indirect_Mode.drawio.png differ diff --git a/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDB_Server_App_In_Cluster.drawio.png b/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDB_Server_App_In_Cluster.drawio.png new file mode 100644 index 00000000..c6c9e06e Binary files /dev/null and b/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDB_Server_App_In_Cluster.drawio.png differ diff --git a/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDB_Server_App_Out_Cluster.drawio.png b/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDB_Server_App_Out_Cluster.drawio.png new file mode 100644 index 00000000..028fbe7c Binary files /dev/null and b/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDB_Server_App_Out_Cluster.drawio.png differ diff --git a/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDL_Auditor_Multi_Account.drawio.png b/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDL_Auditor_Multi_Account.drawio.png new file mode 100644 index 00000000..76e1aa16 Binary files /dev/null and b/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDL_Auditor_Multi_Account.drawio.png differ diff --git a/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDL_Auditor_Multi_Namespace.drawio.png b/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDL_Auditor_Multi_Namespace.drawio.png new file mode 100644 index 00000000..026b4a2d Binary files /dev/null and b/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDL_Auditor_Multi_Namespace.drawio.png differ diff --git a/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDL_Auditor_Multi_VNet.drawio.png b/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDL_Auditor_Multi_VNet.drawio.png new file mode 100644 index 00000000..92eba96d Binary files /dev/null and b/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDL_Auditor_Multi_VNet.drawio.png differ diff --git a/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDL_Ledger.drawio.png b/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDL_Ledger.drawio.png new file mode 100644 index 00000000..9ee4fd22 Binary files /dev/null and b/docs/3.12/scalar-kubernetes/images/png/AKS_ScalarDL_Ledger.drawio.png differ diff --git a/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDB_Cluster_Direct_Kubernetes_Mode.drawio.png b/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDB_Cluster_Direct_Kubernetes_Mode.drawio.png new file mode 100644 index 00000000..00fef239 Binary files /dev/null and b/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDB_Cluster_Direct_Kubernetes_Mode.drawio.png differ diff --git a/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDB_Cluster_Indirect_Mode.drawio.png b/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDB_Cluster_Indirect_Mode.drawio.png new file mode 100644 index 00000000..db122e17 Binary files /dev/null and b/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDB_Cluster_Indirect_Mode.drawio.png differ diff --git a/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDB_Server_App_In_Cluster.drawio.png b/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDB_Server_App_In_Cluster.drawio.png new file mode 100644 index 00000000..c49fbe4f Binary files /dev/null and b/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDB_Server_App_In_Cluster.drawio.png differ diff --git a/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDB_Server_App_Out_Cluster.drawio.png b/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDB_Server_App_Out_Cluster.drawio.png new file mode 100644 index 00000000..d8dcde16 Binary files /dev/null and b/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDB_Server_App_Out_Cluster.drawio.png differ diff --git a/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDL_Auditor_Multi_Account.drawio.png b/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDL_Auditor_Multi_Account.drawio.png new file mode 100644 index 00000000..1d9e7889 Binary files /dev/null and b/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDL_Auditor_Multi_Account.drawio.png differ diff --git a/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDL_Auditor_Multi_Namespace.drawio.png b/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDL_Auditor_Multi_Namespace.drawio.png new file mode 100644 index 00000000..bea249f3 Binary files /dev/null and b/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDL_Auditor_Multi_Namespace.drawio.png differ diff --git a/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDL_Auditor_Multi_VPC.drawio.png b/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDL_Auditor_Multi_VPC.drawio.png new file mode 100644 index 00000000..30d5af46 Binary files /dev/null and b/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDL_Auditor_Multi_VPC.drawio.png differ diff --git a/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDL_Ledger.drawio.png b/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDL_Ledger.drawio.png new file mode 100644 index 00000000..ce5fd7b5 Binary files /dev/null and b/docs/3.12/scalar-kubernetes/images/png/EKS_ScalarDL_Ledger.drawio.png differ diff --git a/docs/3.12/scalar-kubernetes/manage-kubernetes.md b/docs/3.12/scalar-kubernetes/manage-kubernetes.md new file mode 100644 index 00000000..4400b704 --- /dev/null +++ b/docs/3.12/scalar-kubernetes/manage-kubernetes.md @@ -0,0 +1,16 @@ +# Managing ScalarDB on Managed Kubernetes Services + +The following documentation is to help you manage a ScalarDB environment on managed Kubernetes services. + +## Monitoring guides + +* [Monitoring Scalar products on a Kubernetes cluster](K8sMonitorGuide.md) +* [Collecting logs from Scalar products on a Kubernetes cluster](K8sLogCollectionGuide.md) + +## Backup and restore guide + +* [Back up and restore ScalarDB or ScalarDL data in a Kubernetes environment](BackupRestoreGuide.md) +* [Back up an RDB in a Kubernetes environment](BackupRDB.md) +* [Back up a NoSQL database in a Kubernetes environment](BackupNoSQL.md) +* [Restore databases in a Kubernetes environment](RestoreDatabase.md) +* [Components to Regularly Check When Running in a Kubernetes Environment](RegularCheck.md) diff --git a/docs/3.12/scalardb-analytics-postgresql/getting-started.md b/docs/3.12/scalardb-analytics-postgresql/getting-started.md new file mode 100644 index 00000000..7751eb43 --- /dev/null +++ b/docs/3.12/scalardb-analytics-postgresql/getting-started.md @@ -0,0 +1,92 @@ +# Getting Started with ScalarDB Analytics with PostgreSQL + +This document explains how to get started with ScalarDB Analytics with PostgreSQL. We assume that you have already installed ScalarDB Analytics with PostgreSQL and that all required services are running. If you don't have such an environment, please follow the instructions in [How to Install ScalarDB Analytics with PostgreSQL in Your Local Environment by Using Docker](./installation.md). Because ScalarDB Analytics with PostgreSQL executes queries via PostgreSQL, we also assume that you already have a `psql` client or another PostgreSQL client to send queries to PostgreSQL. + +## What is ScalarDB Analytics with PostgreSQL? + +ScalarDB, as a universal transaction manager, targets mainly transactional workloads and therefore supports limited subsets of relational queries. + +ScalarDB Analytics with PostgreSQL extends the functionality of ScalarDB to process analytical queries on ScalarDB-managed data by using PostgreSQL and its foreign data wrapper (FDW) extension. + +ScalarDB Analytics with PostgreSQL mainly consists of two components: PostgreSQL and Schema Importer. + +PostgreSQL runs as a service, accepting queries from users to process. FDW extensions are used to read data from the back-end storages that ScalarDB manages. Schema Importer is a tool to import the schema of the ScalarDB database into PostgreSQL so that users can see tables on the PostgreSQL side, which are identical to the tables on the ScalarDB side. + +## Set up a ScalarDB database + +First, you need one or more ScalarDB databases to run analytical queries with ScalarDB Analytics with PostgreSQL. If you have your own ScalarDB database, you can skip this section and use your database instead. If you use the [scalardb-samples/scalardb-analytics-postgresql-sample](https://github.com/scalar-labs/scalardb-samples/tree/main/scalardb-analytics-postgresql-sample) project, you can set up a sample database by running the following command in the project directory. + +```shell +$ docker compose run --rm schema-loader \ + -c /etc/scalardb.properties \ + --schema-file /etc/schema.json \ + --coordinator \ + --no-backup \ + --no-scaling +``` + +This command sets up [multiple storage instances](https://scalardb.scalar-labs.com/docs/latest/multi-storage-transactions/) that consist of DynamoDB, PostgreSQL, and Cassandra. Then, the command creates namespaces for `dynamons`, `postgresns`, and `cassandrans` that are mapped to those storages, creates tables for `dynamons.customer`, `postgresns.orders`, and `cassandrans.lineitem` by using [ScalarDB Schema Loader](https://scalardb.scalar-labs.com/docs/latest/schema-loader/). + +![Multi-storage overview](./images/multi-storage-overview.png) + +You can load sample data into the created tables by running the following command. + +```console +$ docker compose run --rm sample-data-loader +``` + +## Import the schemas from ScalarDB into PostgreSQL + +Next, let's import the schemas of the ScalarDB databases into PostgreSQL that processes analytical queries. ScalarDB Analytics with PostgreSQL provides a tool, Schema Importer, for this purpose. It'll get everything in place to run analytical queries for you. + +```shell +$ docker compose run --rm schema-importer \ + import \ + --config /etc/scalardb.properties \ + --host analytics \ + --port 5432 \ + --database test \ + --user postgres \ + --password postgres \ + --namespace cassandrans \ + --namespace postgresns \ + --namespace dynamons \ + --config-on-postgres-host /etc/scalardb.properties +``` + +If you use your own ScalarDB database, you must replace the `--config` and `--config-on-postgres-host` options with your ScalarDB configuration file and the `--namespace` options with your ScalarDB namespaces to import. + +This creates tables (in precise, views) with the same names as the tables in the ScalarDB databases. In this example, the tables of `dynamons.customer`, `postgresns.orders`, and `cassandrans.lineitem` are created. The column definitions are also identical to the ScalarDB databases. These tables are [foreign tables](https://www.postgresql.org/docs/current/sql-createforeigntable.html) connected to the underlying storage of the ScalarDB databases using FDW. Therefore, you can equate those tables in PostgreSQL with the tables in the ScalarDB databases. + +![Imported schema](./images/imported-schema.png) + +## Run analytical queries + +Now, you have all tables to read the same data in the ScalarDB databases and can run any arbitrary analytical queries supported by PostgreSQL. To run queries, please connect to PostgreSQL with `psql` or other client. + +```shell +$ psql -U postgres -h localhost test +Password for user postgres: + +> select c_mktsegment, count(*) from dynamons.customer group by c_mktsegment; + c_mktsegment | count +--------------+------- + AUTOMOBILE | 4 + BUILDING | 2 + FURNITURE | 1 + HOUSEHOLD | 2 + MACHINERY | 1 +(5 rows) +``` + +For details about the sample data and additional practical work, see the sample application page. + +## Caveats + +### Isolation level + +ScalarDB Analytics with PostgreSQL reads data with the **Read Committed** isolation level set. This isolation level ensures that the data you read has been committed in the past but does not guarantee that you can read consistent data at a particular point in time. + +### Write operations are not supported + +ScalarDB Analytics with PostgreSQL only supports read-only queries. `INSERT`, `UPDATE`, and other write operations are not supported. diff --git a/docs/3.12/scalardb-analytics-postgresql/images/imported-schema.png b/docs/3.12/scalardb-analytics-postgresql/images/imported-schema.png new file mode 100644 index 00000000..1cf8fea3 Binary files /dev/null and b/docs/3.12/scalardb-analytics-postgresql/images/imported-schema.png differ diff --git a/docs/3.12/scalardb-analytics-postgresql/images/multi-storage-overview.png b/docs/3.12/scalardb-analytics-postgresql/images/multi-storage-overview.png new file mode 100644 index 00000000..fc8df1cb Binary files /dev/null and b/docs/3.12/scalardb-analytics-postgresql/images/multi-storage-overview.png differ diff --git a/docs/3.12/scalardb-analytics-postgresql/installation.md b/docs/3.12/scalardb-analytics-postgresql/installation.md new file mode 100644 index 00000000..099e1e92 --- /dev/null +++ b/docs/3.12/scalardb-analytics-postgresql/installation.md @@ -0,0 +1,65 @@ +# How to Install ScalarDB Analytics with PostgreSQL in Your Local Environment by Using Docker + +This document explains how to set up a local environment that runs ScalarDB Analytics with PostgreSQL using the multi-storage back-end of Cassandra, PostgreSQL, and DynamoDB local server using [Docker Compose](https://docs.docker.com/compose/). + +## Prerequisites + +- [Docker Engine](https://docs.docker.com/engine/) and [Docker Compose](https://docs.docker.com/compose/). + +Follow the instructions on the Docker website according to your platform. + +## Step 1. Clone the `scalardb-samples` repository + +[scalardb-samples/scalardb-analytics-postgresql-sample](https://github.com/scalar-labs/scalardb-samples/tree/main/scalardb-analytics-postgresql-sample) repository is a project containing a sample configuration to set up ScalarDB Analytics with PostgreSQL. + +Determine the location on your local machine where you want to run the scalardb-analytics-postgresql-sample app. Then, open Terminal, go to the location by using the `cd` command, and run the following commands: + +```shell +$ git clone https://github.com/scalar-labs/scalardb-samples.git +$ cd scalardb-samples/scalardb-analytics-postgresql-sample +``` + +## Step 2. Log in to Docker + +`docker login` is required to start the ScalarDB Analytics with PostgreSQL Docker image. Because the [scalardb-analytics-postgresql](https://github.com/orgs/scalar-labs/packages/container/package/scalardb-analytics-postgresql) repository in the GitHub Container Registry is currently private, your GitHub account needs permission to access the container images. To get permission for your account, please ask the person in charge of managing GitHub accounts in your organization. In addition, you will also need to use a personal access token (PAT) as a password to log in to `ghcr.io`. For more details, see the official documentation from GitHub at [Authenticating to the Container registry](https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry#authenticating-to-the-container-registry). + +```shell +# The read:packages scope in the personal access token settings must be selected to log in. +$ export CR_PAT= +$ echo $CR_PAT | docker login ghcr.io -u --password-stdin +``` + +## Step 3. Start up the ScalarDB Analytics with PostgreSQL services + +The following command starts up the PostgreSQL instance that serves ScalarDB Analytics with PostgreSQL along with the back-end servers of Cassandra, PostgreSQL, and DynamoDB local in the Docker containers. When you first run the command, the required Docker images will be downloaded from the GitHub Container Registry. + +```shell +$ docker-compose up +``` + +If you want to run the containers in the background, add the `-d` (--detach) option: + +```shell +$ docker-compose up -d +``` + +If you already have your own ScalarDB database and want to use it as a back-end service, you can launch only the PostgreSQL instance without starting additional back-end servers in the container. + +```shell +$ docker-compose up analytics +``` + +### Step 4. Run your analytical queries + +Now, you should have all the required services running. To run analytical queries, see [Getting Started with ScalarDB Analytics with PostgreSQL](./getting-started.md). + +### Step 5. Shut down the ScalarDB Analytics with PostgreSQL services + +To shut down the containers, do one of the following in Terminal, depending on how you: + +- If you started the containers in the foreground, press Ctrl+C where `docker-compose` is running. +- If you started the containers in the background, run the following command. + +```shell +$ docker-compose down +``` diff --git a/docs/3.12/scalardb-analytics-postgresql/scalardb-fdw.md b/docs/3.12/scalardb-analytics-postgresql/scalardb-fdw.md new file mode 100644 index 00000000..78fe9787 --- /dev/null +++ b/docs/3.12/scalardb-analytics-postgresql/scalardb-fdw.md @@ -0,0 +1,174 @@ +# ScalarDB FDW + +ScalarDB FDW is a PostgreSQL extension that implements a foreign data wrapper (FDW) for [ScalarDB](https://www.scalar-labs.com/scalardb/). + +ScalarDB FDW uses the Java Native Interface to directly utilize ScalarDB as a library inside the FDW and read data from external databases via scan operations for ScalarDB. + +## Prerequisites + +You must have the following prerequisites set up in your environment. + +### JDK + +You must install a version of the Java Development Kit (JDK) that is compatible with ScalarDB. In addition, you must set the `JAVA_HOME` environment variable, which points to your JDK installation directory. + +Note that since these extensions use the Java Native Interface (JNI) internally, you must include the dynamic library of the Java virtual machine (JVM), such as `libjvm.so`, in the library search path. + +### PostgreSQL + +This extension supports PostgreSQL 13 or later. For details on how to install PostgreSQL, see the official documentation at [Server Administration](https://www.postgresql.org/docs/current/admin.html). + +## Build and installation + +You can build and install this extension by running the following command. + +```console +make install +``` + +### Common build errors + +This section describes some common build errors that you might encounter. + +#### ld: library not found for -ljvm + +Normally, the build script finds the path for `libjvm.so` and properly sets it as a library search path. However, if you encounter the error `ld: library not found for -ljvm`, please copy the `libjvm.so` file to the default library search path. For example: + +```console +ln -s //libjvm.so /usr/lib64/libjvm.so +``` + +## Usage + +This section provides a usage example and available options for FDW for ScalarDB. + +### Example + +The following example shows you how to install and create the necessary components, and then run a query by using the FDW extension. + +#### 1. Install the extension + +For details on how to install the extension, see the [Build and installation](#build-and-installation) section. + +#### 2. Create an extension + +To create an extension, run the following command: + +```sql +CREATE EXTENSION scalardb_fdw; +``` + +#### 3. Create a foreign server + +To create a foreign server, run the following command: + +```sql +CREATE SERVER scalardb FOREIGN DATA WRAPPER scalardb_fdw OPTIONS ( + config_file_path '/path/to/scalardb.properties' +); +``` + +#### 4. Create user mapping + +To create user mapping, run the following command: + +```sql +CREATE USER MAPPING FOR PUBLIC SERVER scalardb; +``` + +#### 5. Create a foreign table + +To create a foreign table, run the following command: + +```sql +CREATE FOREIGN TABLE sample_table ( + pk int, + ck1 int, + ck2 int, + boolean_col boolean, + bigint_col bigint, + float_col double precision, + double_col double precision, + text_col text, + blob_col bytea +) SERVER scalardb OPTIONS ( + namespace 'ns', + table_name 'sample_table' +); +``` + +#### 6. Run a query + +To run a query, run the following command: + +```sql +select * from sample_table; +``` + +### Available options + +You can set the following options for ScalarDB FDW objects. + +#### `CREATE SERVER` + +You can set the following options on a ScalarDB foreign server object: + +| Name | Required | Type | Description | +| ------------------ | -------- | -------- | --------------------------------------------------------------- | +| `config_file_path` | **Yes** | `string` | The path to the ScalarDB config file. | +| `max_heap_size` | No | `string` | The maximum heap size of JVM. The format is the same as `-Xmx`. | + +#### `CREATE USER MAPPING` + +Currently, no options exist for `CREATE USER MAPPING`. + +#### `CREATE FOREIGN SERVER` + +The following options can be set on a ScalarDB foreign table object: + +| Name | Required | Type | Description | +| ------------ | -------- | -------- | ---------------------------------------------------------------- | +| `namespace` | **Yes** | `string` | The name of the namespace of the table in the ScalarDB instance. | +| `table_name` | **Yes** | `string` | The name of the table in the ScalarDB instance. | + +### Data-type mapping + +| ScalarDB | PostgreSQL | +| -------- | ---------------- | +| BOOLEAN | boolean | +| INT | int | +| BIGINT | bigint | +| FLOAT | float | +| DOUBLE | double precision | +| TEXT | text | +| BLOB | bytea | + +## Testing + +This section describes how to test FDW for ScalarDB. + +### Set up a ScalarDB instance for testing + +Before testing FDW for ScalarDB, you must have a running ScalarDB instance that contains test data. You can set up the instance and load the test data by running the following commands: + +```console +./test/setup.sh +``` + +If you want to reset the instances, you can run the following command, then the above setup command again. + +```console +./test/cleanup.sh +``` + +### Run regression tests + +You can run regression tests by running the following command **after** you have installed the FDW extension. + +```console +make installcheck +``` + +## Limitations + +- This extension aims to enable analytical query processing on ScalarDB-managed databases. Therefore, this extension only supports reading data from ScalarDB. diff --git a/docs/3.12/scalardb-analytics-postgresql/schema-importer.md b/docs/3.12/scalardb-analytics-postgresql/schema-importer.md new file mode 100644 index 00000000..d25ae5c3 --- /dev/null +++ b/docs/3.12/scalardb-analytics-postgresql/schema-importer.md @@ -0,0 +1,60 @@ +# Schema Importer + +Schema Importer is a CLI tool for automatically configuring PostgreSQL. By using this tool, your PostgreSQL database can have identical database objects, such as namespaces and tables, as your ScalarDB instance. + +Schema Importer reads the ScalarDB configuration file, retrieves the schemas of the tables defined in ScalarDB, and creates the corresponding foreign data wrapper external tables and views in that order. For more information, refer to [Getting Started with ScalarDB Analytics with PostgreSQL](getting-started.md). + +## Build Schema Importer + +You can build Schema Importer by using [Gradle](https://gradle.org/). To build Schema Importer, run the following command: + +```console +./gradlew build +``` + +You may want to build a fat JAR file so that you can launch Schema Importer by using `java -jar`. To build the fat JAR, run the following command: + + ```console + ./gradlew shadowJar + ``` + +After you build the fat JAR, you can find the fat JAR file in the `app/build/libs/` directory. + +## Run Schema Importer + +To run Schema Importer by using the fat JAR file, run the following command: + +```console +java -jar +``` +Available options are as follows: + +| Name | Required | Description | Default | +| --------------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------ | +| `--config` | **Yes** | Path to the ScalarDB configuration file | | +| `--config-on-postgres-host` | No | Path to the ScalarDB configuration file on the PostgreSQL-running host | The same value as `--config` will be used. | +| `--namespace`, `-n` | **Yes** | Namespaces to import into the analytics instance. You can specify the `--namespace` option multiple times if you have two or more namespaces. | | +| `--host` | No | PostgreSQL host | localhost | +| `--port` | No | PostgreSQL port | 5432 | +| `--database` | No | PostgreSQL port | postgres | +| `--user` | No | PostgreSQL user | postgres | +| `--password` | No | PostgreSQL password | | +| `--debug` | No | Enable debug mode | | + + +## Test Schema Importer + +To test Schema Importer, run the following command: + +```console +./gradlew test +``` + +## Build a Docker image of Schema Importer + + +To build a Docker image of Schema Importer, run the following command, replacing `` with the tag version of Schema Importer that you want to use: + +```console +docker build -t ghcr.io/scalar-labs/scalardb-analytics-postgresql-schema-importer: -f ./app/Dockerfile . +``` diff --git a/docs/3.12/scalardb-benchmarks/README.md b/docs/3.12/scalardb-benchmarks/README.md new file mode 100644 index 00000000..796091b8 --- /dev/null +++ b/docs/3.12/scalardb-benchmarks/README.md @@ -0,0 +1,235 @@ +# ScalarDB Benchmarking Tools + +This tutorial describes how to run benchmarking tools for ScalarDB. Database benchmarking is helpful for evaluating how databases perform against a set of standards. + +## Benchmark workloads + +- TPC-C +- YCSB (Workloads A, C, and F) +- Multi-storage YCSB (Workloads C and F) + - This YCSB variant is for a multi-storage environment that uses ScalarDB. + - Workers in a multi-storage YCSB execute the same number of read and write operations in two namespaces: `ycsb_primary` and `ycsb_secondary`. + +## Prerequisites + +- One of the following Java Development Kits (JDKs): + - [Oracle JDK](https://www.oracle.com/java/technologies/downloads/) LTS version 8 + - [OpenJDK](https://openjdk.org/install/) LTS version 8 +- Gradle +- [Kelpie](https://github.com/scalar-labs/kelpie) + - Kelpie is a framework for performing end-to-end testing, such as system benchmarking and verification. Get the latest version from [Kelpie Releases](https://github.com/scalar-labs/kelpie), and unzip the archive file. +- A client to run the benchmarking tools +- A target database + - For a list of databases that ScalarDB supports, see [Supported Databases](https://github.com/scalar-labs/scalardb/blob/master/docs/scalardb-supported-databases.md). + +{% capture notice--info %} +**Note** + +Currently, only JDK 8 can be used when running the benchmarking tools. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Set up the benchmarking tools + +The following sections describe how to set up the benchmarking tools. + +### Clone the ScalarDB benchmarks repository + +Open **Terminal**, then clone the ScalarDB benchmarks repository by running the following command: + +```console +$ git clone https://github.com/scalar-labs/scalardb-benchmarks +``` + +Then, go to the directory that contains the benchmarking files by running the following command: + +```console +$ cd scalardb-benchmarks +``` + +### Build the tools + +To build the benchmarking tools, run the following command: + +```console +$ ./gradlew shadowJar +``` + +### Load the schema + +Before loading the initial data, the tables must be defined by using the [ScalarDB Schema Loader](https://github.com/scalar-labs/scalardb/blob/master/docs/schema-loader.md). To apply the schema, go to the [ScalarDB Releases](https://github.com/scalar-labs/scalardb/releases) page and download the ScalarDB Schema Loader that matches the version of ScalarDB that you are using to the `scalardb-benchmarks` root folder. + +In addition, you need a properties file that contains database configurations for ScalarDB. For details about configuring the ScalarDB properties file, see [ScalarDB Configurations](https://github.com/scalar-labs/scalardb/blob/master/docs/configurations.md). + +After applying the schema and configuring the properties file, select a benchmark and follow the instructions to create the tables. + +
+
+ + + +
+ +
+ +To create tables for TPC-C benchmarking ([`tpcc-schema.json`](https://github.com/scalar-labs/scalardb-benchmarks/blob/master/tpcc-schema.json)), run the following command, replacing the contents in the angle brackets as described: + +```console +$ java -jar scalardb-schema-loader-.jar --config -f tpcc-schema.json --coordinator +``` +
+
+ +To create tables for YCSB benchmarking ([`ycsb-schema.json`](https://github.com/scalar-labs/scalardb-benchmarks/blob/master/ycsb-schema.json)), run the following command, replacing the contents in the angle brackets as described: + +```console +$ java -jar scalardb-schema-loader-.jar --config -f ycsb-schema.json --coordinator +``` +
+
+ +To create tables for multi-storage YCSB benchmarking ([`ycsb-multi-storage-schema.json`](https://github.com/scalar-labs/scalardb-benchmarks/blob/master/ycsb-multi-storage-schema.json)), run the following command, replacing the contents in the angle brackets as described: + +```console +$ java -jar scalardb-schema-loader-.jar --config -f ycsb-multi-storage-schema.json --coordinator +``` +
+
+ +### Prepare a benchmarking configuration file + +To run a benchmark, you must first prepare a benchmarking configuration file. The configuration file requires at least the locations of the workload modules to run and the database configuration. + +The following is an example configuration for running the TPC-C benchmark. The ScalarDB properties file specified for `config_file` should be the properties file for the [benchmarking environment that you previously set up](#set-up-your-environment). + +{% capture notice--info %} +**Note** + +Alternatively, instead of using the ScalarDB properties file, you can specify each database configuration item in the `.toml` file. If `config_file` is specified, all other configurations under `[database_config]` will be ignored even if they are uncommented. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +```toml +[modules] +[modules.preprocessor] +name = "com.scalar.db.benchmarks.tpcc.TpccLoader" +path = "./build/libs/scalardb-benchmarks-all.jar" +[modules.processor] +name = "com.scalar.db.benchmarks.tpcc.TpccBench" +path = "./build/libs/scalardb-benchmarks-all.jar" +[modules.postprocessor] +name = "com.scalar.db.benchmarks.tpcc.TpccReporter" +path = "./build/libs/scalardb-benchmarks-all.jar" + +[database_config] +config_file = "" +#contact_points = "localhost" +#contact_port = 9042 +#username = "cassandra" +#password = "cassandra" +#storage = "cassandra" +``` + +You can define parameters to pass to modules in the configuration file. For details, see the sample configuration files below and available parameters in [Common parameters](#common-parameters): + +- **TPC-C:** [`tpcc-benchmark-config.toml`](https://github.com/scalar-labs/scalardb-benchmarks/blob/master/tpcc-benchmark-config.toml) +- **YCSB:** [`ycsb-benchmark-config.toml`](https://github.com/scalar-labs/scalardb-benchmarks/blob/master/ycsb-benchmark-config.toml) +- **Multi-storage YCSB:** [`ycsb-multi-storage-benchmark-config.toml`](https://github.com/scalar-labs/scalardb-benchmarks/blob/master/ycsb-multi-storage-benchmark-config.toml) + +## Run a benchmark + +Select a benchmark, and follow the instructions to run the benchmark. + +
+
+ + + +
+ +
+ +To run the TPC-C benchmark, run the following command, replacing `` with the path to the Kelpie directory: + +```console +$ //bin/kelpie --config tpcc-benchmark-config.toml +``` +
+
+ +To run the YCSB benchmark, run the following command, replacing `` with the path to the Kelpie directory: + +```console +$ //bin/kelpie --config ycsb-benchmark-config.toml +``` +
+
+ +To run the multi-storage YCSB benchmark, run the following command, replacing `` with the path to the Kelpie directory: + +```console +$ //bin/kelpie --config ycsb-multi-storage-benchmark-config.toml +``` +
+
+ +In addition, the following options are available: + +- `--only-pre`. Only loads the data. +- `--only-process`. Only runs the benchmark. +- `--except-pre` Runs a job without loading the data. +- `--except-process`. Runs a job without running the benchmark. + +## Common parameters + +| Name | Description | Default | +|:---------------|:--------------------------------------------------------|:----------| +| `concurrency` | Number of threads for benchmarking. | `1` | +| `run_for_sec` | Duration of benchmark (in seconds). | `60` | +| `ramp_for_sec` | Duration of ramp-up time before benchmark (in seconds). | `0` | + +## Workload-specific parameters + +Select a benchmark to see its available workload parameters. + +
+
+ + +
+ +
+ +| Name | Description | Default | +|:-----------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:----------| +| `num_warehouses` | Number of warehouses (scale factor) for benchmarking. | `1` | +| `load_concurrency` | Number of threads for loading. | `1` | +| `load_start_warehouse` | Start ID of loading warehouse. This option can be useful with `--skip-item-load` when loading large-scale data with multiple clients or adding additional warehouses. | `1` | +| `load_end_warehouse` | End ID of loading warehouse. You can use either `--num-warehouses` or `--end-warehouse` to specify the number of loading warehouses. | `1` | +| `skip_item_load` | Whether or not to skip loading item table. | `false` | +| `use_table_index` | Whether or not to use a generic table-based secondary index instead of ScalarDB's secondary index. | `false` | +| `np_only` | Run benchmark with only new-order and payment transactions (50% each). | `false` | +| `rate_new_order` | Percentage of new-order transactions. When specifying this percentage based on your needs, you must specify the percentages for all other rate parameters. In that case, the total of all rate parameters must equal 100 percent. | N/A | +| `rate_payment` | Percentage of payment transactions. When specifying this percentage based on your needs, you must specify the percentages for all other rate parameters. In that case, the total of all rate parameters must equal 100 percent. | N/A | +| `rate_order_status` | Percentage of order-status transactions. When specifying this percentage based on your needs, you must specify the percentages for all other rate parameters. In that case, the total of all rate parameters must equal 100 percent. | N/A | +| `rate_delivery` | Percentage of delivery transactions. When specifying this percentage based on your needs, you must specify the percentages for all other rate parameters. In that case, the total of all rate parameters must equal 100 percent. | N/A | +| `rate_stock_level` | Percentage of stock-level transactions. When specifying this percentage based on your needs, you must specify the percentages for all other rate parameters. In that case, the total of all rate parameters must equal 100 percent. | N/A | +| `backoff` | Sleep time in milliseconds inserted after a transaction is aborted due to a conflict. | `0` | + +
+
+ +| Name | Description | Default | +|:------------------------|:----------------------------------------------------------------------------------|:----------------------------------------------| +| `load_concurrency` | Number of threads for loading. | `1` | +| `load_batch_size` | Number of put records in a single loading transaction. | `1` | +| `load_overwrite` | Whether or not to overwrite when loading records. | `false` | +| `ops_per_tx` | Number of operations in a single transaction. | `2` (Workloads A and C)
`1` (Workload F) | +| `record_count` | Number of records in the target table. | `1000` | +| `use_read_modify_write` | Whether or not to use read-modify-writes instead of blind writes in Workload A. | `false`[^rmw] | + +[^rmw]: The default value is `false` for `use_read_modify_write` since Workload A doesn't assume that the transaction reads the original record first. However, if you're using Consensus Commit as the transaction manager, you must set `use_read_modify_write` to `true`. This is because ScalarDB doesn't allow a blind write for an existing record. +
+
diff --git a/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-admin-api.md b/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-admin-api.md new file mode 100644 index 00000000..987f356f --- /dev/null +++ b/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-admin-api.md @@ -0,0 +1,104 @@ +# Getting Started with the Administrative API in the ScalarDB Cluster .NET Client SDK + +The ScalarDB Cluster .NET Client SDK supports the Administrative API of ScalarDB Cluster. By using this API, you can manage ScalarDB Cluster from .NET applications. + +{% capture notice--info %} +**Note** + +Although we recommend using asynchronous methods as in the following examples, you can use synchronous methods instead. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Get a transaction manager + +First, you need to get an object for interacting with the Administrative API. To get the object, you can use `TransactionFactory` as follows, replacing `` with the FQDN or the IP address, and `` with the port number (`60053` by default) of your cluster: + +```c# +var scalarDbOptions = new ScalarDbOptions + { + Address = "http://:", + HopLimit = 10 + }; +var factory = TransactionFactory.Create(scalarDbOptions); + +using var admin = factory.GetTransactionAdmin(); +``` + +## Manage ScalarDB Cluster + +The following operations can be performed by using the ScalarDB Cluster .NET Client SDK. + +### Create a new namespace + +```c# +await admin.CreateNamespaceAsync("ns", ifNotExists: true); +``` + +### Drop a namespace + +```c# +await admin.DropNamespaceAsync("ns", ifExists: true); +``` + +### Check if a namespace exists + +```c# +var namespaceExists = await admin.IsNamespacePresentAsync("ns"); +``` + +### Create a new table + +```c# +using Scalar.Db.Cluster.Rpc.V1; +// ... +using ScalarDB.Net.Client.Builders; + +// ... + +var tableMetadata = + new TableMetadataBuilder() + .AddPartitionKey("pk", DataType.Int) + .AddClusteringKey("ck", DataType.Double) + .AddSecondaryIndex("index", DataType.Float) + .AddColumn("ordinary", DataType.Text) + .Build(); + +await admin.CreateTableAsync("ns", "table_name", tableMetadata, ifNotExists: true); +``` + +### Drop a table + +```c# +await admin.DropTableAsync("ns", "table_name", ifExists: true); +``` + +### Checking if a table exists + +```c# +var tableExists = await admin.IsTablePresentAsync("ns", "table_name"); +``` + +### Get the names of existing tables + +```c# +var tablesList = await admin.GetTableNamesAsync("ns"); +``` + +### Create the Coordinator table + +```c# +await admin.CreateCoordinatorTablesAsync(); +``` + +### Drop the Coordinator table + +```c# +await admin.DropCoordinatorTablesAsync(); +``` + +### Check if the Coordinator table exist + +```c# +var exists = await admin.AreCoordinatorTablesPresentAsync(); +``` diff --git a/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-aspnet-and-di.md b/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-aspnet-and-di.md new file mode 100644 index 00000000..96591860 --- /dev/null +++ b/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-aspnet-and-di.md @@ -0,0 +1,49 @@ +# Getting Started with ASP.NET Core and Dependency Injection in the ScalarDB Cluster .NET Client SDK + +The ScalarDB Cluster .NET Client SDK supports Dependency Injection in frameworks like ASP.NET Core. + +You can register the ScalarDB Cluster transaction managers as services in `IServiceCollection` as follows, replacing `` with the FQDN or the IP address, and `` with the port number (`60053` by default) of your cluster: + +```c# +using ScalarDB.Net.Client.Extensions; + +//... + +var builder = WebApplication.CreateBuilder(args); + +//... + +builder.Services.AddScalarDbCluster(options => +{ + options.Address = "http://:"; + options.HopLimit = 10; + options.EnableCrud = true; // true by default + options.EnableSql = true; // false by default + options.EnableAdmin = true; // false by default +}); +``` + +After registering the transaction managers, they can be injected into the controller's constructor as follows: + +```c# +[ApiController] +public class OrderController: ControllerBase +{ + private readonly IDistributedTransactionManager _manager; + + public OrderController(IDistributedTransactionManager manager) + { + _manager = manager; + } +} +``` + +Although these examples are for WebApi projects, the examples will work in a similar way in GrpcService projects. + +### `AddScalarDbCluster`-specific options + +This section describes some specific options for `AddScalarDbCluster`: + +- **EnableCrud:** Enables the transaction managers for executing CRUD operations. `IDistributedTransactionManager` and `ITwoPhaseCommitTransactionManager` will become available for injection. +- **EnableSql:** Enables the transaction managers for executing SQL statements. `ISqlTransactionManager` and `ISqlTwoPhaseCommitTransactionManager` will become available for injection. +- **EnableAdmin:** Enables the administrative interface. `IDistributedTransactionAdmin` will become available for injection. diff --git a/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-auth.md b/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-auth.md new file mode 100644 index 00000000..aa4334ec --- /dev/null +++ b/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-auth.md @@ -0,0 +1,27 @@ +# Getting Started with ScalarDB Auth by Using ScalarDB Cluster .NET Client SDK + +The ScalarDB Cluster .NET Client SDK supports [ScalarDB Auth](https://github.com/scalar-labs/scalardb-cluster/blob/main/docs/scalardb-auth-with-sql.md), which allows you to authenticate and authorize your requests to ScalarDB Cluster. + +## Set credentials in `ScalarDbOptions` + +First, you need to get a transaction manager or transaction admin object with credentials by using `TransactionFactory` as follows, replacing the contents in the angle brackets as described. Also, be sure to replace `` with `GetTransactionManager()`, `GetTwoPhaseCommitTransactionManager()`, `GetSqlTransactionManager()`, or `GetSqlTwoPhaseCommitTransactionManager()`. + +```c# +var scalarDbOptions = new ScalarDbOptions + { + Address = "http://:", + HopLimit = 10, + AuthEnabled = true, + Username = "", + Password = "" + }; +var factory = TransactionFactory.Create(scalarDbOptions); + +// To get a transaction manager +using var manager = factory.(); + +// To get a transaction admin +using var admin = factory.GetTransactionAdmin(); +``` + +A transaction manager or transaction admin object created from `TransactionFactory` with the provided credentials will automatically log in to ScalarDB Cluster and can communicate with it. diff --git a/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-distributed-sql-transactions.md b/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-distributed-sql-transactions.md new file mode 100644 index 00000000..b61c8468 --- /dev/null +++ b/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-distributed-sql-transactions.md @@ -0,0 +1,112 @@ +# Getting Started with Distributed SQL Transactions in the ScalarDB Cluster .NET Client SDK + +The ScalarDB Cluster .NET Client SDK supports the distributed SQL transaction functionality of ScalarDB Cluster. The SDK includes transaction and manager abstractions for easier communication within a cluster. + +{% capture notice--info %} +**Note** + +Although we recommend using asynchronous methods, as in the following examples, you can use synchronous methods instead. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +For details about distributed non-SQL transactions, see [Getting Started with Distributed Transactions in the ScalarDB Cluster .NET Client SDK](getting-started-with-distributed-transactions.md). + +## Get a transaction manager + +First, you need to get a transaction manager object for distributed SQL transactions. To get the transaction manager object, you can use `TransactionFactory` as follows, replacing `` with the FQDN or the IP address, and `` with the port number (`60053` by default) of your cluster: + +```c# +var scalarDbOptions = new ScalarDbOptions + { + Address = "http://:", + HopLimit = 10 + }; +var factory = TransactionFactory.Create(scalarDbOptions); + +using var manager = factory.GetSqlTransactionManager(); +``` + +## Execute SQL queries + +To execute a SQL statement, you need a `SqlStatement` object, which can be created by using a builder as follows: + +```c# +using ScalarDB.Net.Client.Builders; + +// ... + +var sqlStatement = + new SqlStatementBuilder() + .SetSql("SELECT * FROM order_service.statements WHERE item_id = :item_id") + .AddParam("item_id", 2) + .Build(); +``` + +A single SQL statement can be executed directly by using the transaction manager as follows: + +```c# +var resultSet = await manager.ExecuteAsync(sqlStatement); +``` + +The result from the `ExecuteAsync` method will contain records received from the cluster. The SDK has `GetValue`, `TryGetValue`, and `IsNull` extension methods to simplify using the records: + +```c# +using ScalarDB.Net.Client.Extensions; + +// ... + +foreach (var record in resultSet.Records) +{ + // Getting an integer value from the "item_id" column. + // If it fails, an exception will be thrown. + var itemId = record.GetValue("item_id"); + + // Trying to get a string value from the "order_id" column. + // If it fails, no exception will be thrown. + if (record.TryGetValue("order_id", out var orderId)) + Console.WriteLine($"order_id: {orderId}"); + + // Checking if the "count" column is null. + if (record.IsNull("count")) + Console.WriteLine("'count' is null"); +} +``` + +## Execute SQL queries in a transaction + +To execute multiple SQL statements as part of a single transaction, you need a transaction object. + +You can create a transaction object by using the transaction manager as follows: + +```c# +var transaction = await manager.BeginAsync(); +``` + +You can also resume a transaction that has already been started as follows: + +```c# +var transaction = manager.Resume(transactionIdString); +``` + +{% capture notice--info %} +**Note** + +The `Resume` method doesn't have an asynchronous version because it only creates a transaction object. Because of this, resuming a transaction by using the wrong ID is possible. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +The transaction has the same `ExecuteAsync` method as the transaction manager. That method can be used to execute SQL statements. + +When a transaction is ready to be committed, you can call the `CommitAsync` method of the transaction as follows: + +```c# +await transaction.CommitAsync(); +``` + +To roll back the transaction, you can use the `RollbackAsync` method: + +```c# +await transaction.RollbackAsync(); +``` diff --git a/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-distributed-transactions.md b/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-distributed-transactions.md new file mode 100644 index 00000000..f15f2c4c --- /dev/null +++ b/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-distributed-transactions.md @@ -0,0 +1,181 @@ +# Getting Started with Distributed Transactions in the ScalarDB Cluster .NET Client SDK + +The ScalarDB Cluster .NET Client SDK supports the distributed transaction functionality of ScalarDB Cluster. The SDK includes transaction and manager abstractions for easier communication within a cluster. + +{% capture notice--info %} +**Note** + +Although we recommend using asynchronous methods as in the following examples, you can use synchronous versions instead. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +For details about distributed SQL transactions, see [Getting Started with Distributed SQL Transactions in the ScalarDB Cluster .NET Client SDK](getting-started-with-distributed-sql-transactions.md). + +## Get a transaction manager + +First, you need to get a transaction manager for distributed transactions. To get the transaction manager, you can use `TransactionFactory` as follows, replacing `` with the FQDN or the IP address, and `` with the port number (`60053` by default) of your cluster: + +```c# +var scalarDbOptions = new ScalarDbOptions + { + Address = "http://:", + HopLimit = 10 + }; +var factory = TransactionFactory.Create(scalarDbOptions); + +using var manager = factory.GetTransactionManager(); +``` + +## Manage transactions + +To execute CRUD operations, a transaction is needed. You can begin a transaction by using the transaction manager as follows: + +```c# +var transaction = await manager.BeginAsync(); +``` + +You can also resume a transaction that is already being executed as follows: + +```c# +var transaction = manager.Resume(transactionIdString); +``` + +{% capture notice--info %} +**Note** + +The `Resume` method doesn't have an asynchronous version because it only creates a transaction object. Because of this, resuming a transaction by using the wrong ID is possible. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +When a transaction is ready to be committed, you can call the `CommitAsync` method of the transaction as follows: + +```c# +await transaction.CommitAsync(); +``` + +To roll back the transaction, you can use the `RollbackAsync` method: + +```c# +await transaction.RollbackAsync(); +``` + +## Execute CRUD operations + +A transaction has `GetAsync`, `ScanAsync`, `PutAsync`, `DeleteAsync`, and `MutateAsync` methods to execute CRUD commands against the cluster. As a parameter, these methods have a command object. A command object can be created by using the builders listed in this section. + +To use these builders add the following namespace to the `using` section: + +```c# +using ScalarDB.Net.Client.Builders; +``` + +{% capture notice--info %} +**Note** + +The cluster does not support parallel execution of commands inside one transaction, so make sure to use `await` for asynchronous methods. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### `GetAsync` method example + +```c# +using GetTypeEnum = Scalar.Db.Cluster.Rpc.V1.Get.Types.GetType; + +// ... + +var get = + new GetBuilder() + .SetNamespaceName("ns") + .SetTableName("statements") + .SetGetType(GetTypeEnum.Get) + .AddPartitionKey("order_id", "1") + .AddClusteringKey("item_id", 2) + .SetProjections("item_id", "count") + .Build(); + +var getResult = await transaction.GetAsync(get); +``` + +### `ScanAsync` method example + +```c# +using static Scalar.Db.Cluster.Rpc.V1.Scan.Types; + +// .. + +var scan = + new ScanBuilder() + .SetNamespaceName("ns") + .SetTableName("statements") + .SetScanType(ScanType.Scan) + .AddPartitionKey("order_id", "1") + .AddStartClusteringKey("item_id", 2) + .SetStartInclusive(true) + .AddEndClusteringKey("item_id", 8) + .SetEndInclusive(true) + .SetProjections("item_id", "count") + .Build(); + +var scanResult = await transaction.ScanAsync(get); +``` + +### `PutAsync` method example + +```c# +var put = + new PutBuilder() + .SetNamespaceName("ns") + .SetTableName("statements") + .AddPartitionKey("order_id", "1") + .AddClusteringKey("item_id", 2) + .AddColumn("count", 11) + .Build(); + +await client.PutAsync(put); +``` + +### `DeleteAsync` method example + +```c# +var delete = + new DeleteBuilder() + .SetNamespaceName("ns") + .SetTableName("statements") + .AddPartitionKey("order_id", "1") + .AddClusteringKey("item_id", 2) + .Build(); + +await client.DeleteAsync(delete); +``` + +### `MutateAsync` method example: + +```c# +using Scalar.Db.Cluster.Rpc.V1; + +// ... + +var put = + new PutBuilder() + .SetNamespaceName("ns") + .SetTableName("statements") + .AddPartitionKey("order_id", "1") + .AddClusteringKey("item_id", 2) + .AddColumn("count", 11) + .Build(); + +var mutate = new Mutation { Put = put }; + +await client.MutateAsync(new[] { mutate }); +``` + +{% capture notice--info %} +**Note** + +To modify data by using the `PutAsync`, `DeleteAsync`, or `MutateAsync` method, the data must be retrieved first by using the `GetAsync` or `ScanAsync` method. +{% endcapture %} + +
{{ notice--info | markdownify }}
diff --git a/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-linq.md b/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-linq.md new file mode 100644 index 00000000..e262d1a2 --- /dev/null +++ b/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-linq.md @@ -0,0 +1,354 @@ +# Getting Started with LINQ in the ScalarDB Cluster .NET Client SDK + +The ScalarDB Cluster .NET Client SDK supports querying the cluster with LINQ and some Entity Framework-like functionality. + +{% capture notice--info %} +**Note** + +This SDK doesn't support [Entity Framework](https://learn.microsoft.com/en-us/ef/). Instead, this SDK implements functionality that is similar to Entity Framework. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +{% capture notice--info %} +**Note** + +SQL support must be enabled on the cluster to use LINQ. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Set up classes + +After confirming that SQL support is enabled, create a C# class for each ScalarDB table that you want to use. For example: + +```c# +using System.ComponentModel.DataAnnotations.Schema; +using ScalarDB.Net.Client.DataAnnotations; + +// ... + +[Table("ns.statements")] +public class Statement +{ + [Column("statement_id", Order = 0), PartitionKey] + public int Id { get; set; } + + [Column("order_id", Order = 1), SecondaryIndex] + public string OrderId { get; set; } = String.Empty; + + [Column("item_id", Order = 2), SecondaryIndex] + public int ItemId { get; set; } + + [Column("count", Order = 3)] + public int Count { get; set; } +} + +[Table("order_service.items")] +public class Item +{ + [Column("item_id", Order = 0), PartitionKey] + public int Id { get; set; } + + [Column("name", Order = 1)] + public string Name { get; set; } = String.Empty; + + [Column("price", Order = 2)] + public int Price { get; set; } +} +``` + +If a partition key, clustering key, or secondary index consists of more than one column, the `Order` property of `ColumnAttribute` will decide the order inside the key or index. + +Create a context class that has properties for all the tables you want to use. For example: + +```c# + public class MyDbContext: ScalarDbContext + { + public ScalarDbSet Statements { get; set; } + public ScalarDbSet Items { get; set; } + } +``` + +After all the classes are created, you need to add the created context to the Dependency Injection. For example: + +```c# +using ScalarDB.Net.Client.Extensions; + +//... + +var builder = WebApplication.CreateBuilder(args); + +//... + +builder.Services.AddScalarDbContext(options => +{ + options.Address = "http://:"; + options.HopLimit = 10; +}); +``` + +The context can be injected into the controller's constructor as follows: + +```c# +[ApiController] +public class OrderController: ControllerBase +{ + private readonly MyDbContext _myDbContext; + + public OrderController(MyDbContext myDbContext) + { + _myDbContext = myDbContext; + } +} +``` + +## Use LINQ to query properties + +After receiving `MyDbContext` in your controller, you can query its properties by using LINQ. For example: + +### Use query syntax + +```c# +from stat in _myDbContext.Statements +join item in _myDbContext.Items on stat.ItemId equals item.Id +where stat.Count > 2 && item.Name.Contains("apple") +orderby stat.Count descending, stat.ItemId +select new { item.Name, stat.Count }; +``` + +### Use method syntax + +```c# +_myDbContext.Statements + .Where(stat => stat.OrderId == "1") + .Skip(1) + .Take(2); +``` + +### Use the `First` method to get one `Statement` by its partition key + +```c# +_myDbContext.Statements.First(stat => stat.OrderId == "1"); +``` + +### Use the `DefaultIfEmpty` method to perform left outer join + +```c# +from stat in _myDbContext.Statements +join item in _myDbContext.Items on stat.ItemId equals item.Id into items +from i in items.DefaultIfEmpty() +select new { ItemName = i != null ? i.Name : "" } +``` + +The following methods are supported: + +- `Select` +- `Where` +- `Join` +- `GroupJoin` +- `First`/`FirstOrDefault` +- `Skip` +- `Take` +- `OrderBy`/`OrderByDescending` +- `ThenBy`/`ThenByDescending` + +The following `String` methods are supported inside the predicates of `Where` and `First`/`FirstOrDefault` methods: + +- `Contains` +- `StartsWith` +- `EndsWith` + +Unsupported LINQ methods can be used after the supported methods. For example: + +```c# +_myDbContext.Statements + .Where(stat => stat.OrderId == "1") // Will be executed remotely on the cluster. + .Distinct() // Will be executed locally in the app. + .Where(stat => stat.ItemId < 5); // Will be executed locally. +``` + +{% capture notice--info %} +**Note** + +If `Skip` is specified before `Take` or `First`/`FirstOrDefault`, the number that is passed to `Skip` will be added to the `LIMIT` number in the SQL query. By itself, `Skip` won't change the resulting SQL query. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Limitations when using LINQ against `ScalarDbSet{T}` objects + +- All method calls are supported inside `Select`. For example: + +```c# +.Select(stat => convertToSomething(stat.ItemId)) +//... +.Select(stat => stat.ItemId * getSomeNumber()) +``` + +- Method calls, except for calls against the querying object, are also supported inside `Where` and `First`/`FirstOrDefault`. For example: + +```c# +.Where(stat => stat.ItemId == getItemId()) // is OK +//... +.Where(stat => stat.ItemId.ToString() == "1") // is not supported +``` + +- All method calls are supported inside the result-selecting lambda of `Join` and `GroupJoin`. For example: + +```c# +.Join(_myDbContext.Items, + stat => stat.ItemId, + item => item.Id, + (stat, item) => new { ItemName = convertToSomething(item.Name), + ItemCount = stat.Count.ToString() }) +``` + +- Method calls are not supported inside the key-selecting lambdas of `Join` and `GroupJoin`. +- Custom equality comparers are not supported. The `comparer` argument in `Join` and `GroupJoin` methods will be ignored if the argument has been passed. +- More than one `from` directly in one query is not supported, except when the `DefaultIfEmpty` method is used to perform left outer join. Each subsequent `from` is considered to be a separate query. + +```c# +var firstQuery = from stat in _myDbContext.Statements + where stat.Count > 2 + select new { stat.Count }; + +var secondQuery = from item in _myDbContext.Items + where item.Price > 6 + select new { item.Name }; + +var finalQuery = from first in firstQuery + from second in secondQuery + select new { first.Count, second.Name }; + +// 1. firstQuery will be executed against the cluster. +// 2. secondQuery will be executed against the cluster for each object (row) from 1. +// 3. finalQuery will be executed locally with the results from 1 and 2. +var result = finalQuery.ToArray(); +``` + +- Method calls are not supported inside `OrderBy`/`OrderByDescending` or `ThenBy`/`ThenByDescending`. +- Only overloads of `Contains`, `StartsWith`, and `EndsWith` methods that have a single string argument are supported inside `Where` and `First`/`FirstOrDefault`. + +## Modify data in a cluster by using `ScalarDbContext` + +The properties of the class inherited from `ScalarDbContext` can be used to modify data. + +### Add a new object by using the `AddAsync` method + +```c# +var statement = new Statement + { + OrderId = "2", + ItemId = 4, + Count = 8 + }; +await _myDbContext.Statements.AddAsync(statement); +``` + +### Update an object by using the `UpdateAsync` method + +```c# +var statement = _myDbContext.Statements.First(stat => stat.Id == 1); + +// ... + +statement.Count = 10; +await _myDbContext.Statements.UpdateAsync(statement); +``` + +### Remove an object by using the `RemoveAsync` method + +```c# +var statement = _myDbContext.Statements.First(stat => stat.Id == 1); + +// ... + +await _myDbContext.Statements.RemoveAsync(statement); +``` + +## Manage transactions + +LINQ queries and `AddAsync`, `UpdateAsync`, and `RemoveAsync` methods can be executed without an explicitly started transaction. However, to execute multiple queries and methods as part of a single transaction, the transaction must be explicitly started and committed. `ScalarDbContext` supports both ordinary transactions and transactions with the two-phase commit interface in ScalarDB. + +### Begin a new transaction + +```c# +await _myDbContext.BeginTransactionAsync(); +``` + +### Begin a new transaction with the two-phase commit interface + +```c# +using ScalarDB.Net.Client.Core; + +// ... + +await _myDbContext.BeginTransactionAsync(TransactionType.TwoPhaseCommit); +``` + +### Get the ID of a currently active transaction + +```c# +var transactionId = _myDbContext.CurrentTransactionId; +``` + +### Get the type of a currently active transaction + +```c# +var transactionType = _myDbContext.CurrentTransactionType; +``` + +### Join an existing transaction with the two-phase commit interface + +```c# +using ScalarDB.Net.Client.Core; + +// ... + +await _myDbContext.JoinTransactionAsync(transactionId, TransactionType.TwoPhaseCommit); +``` + +### Resume an existing transaction + +```c# +await _myDbContext.ResumeTransaction(transactionId); +``` + +### Resume an existing transaction with the two-phase commit interface + +```c# +await _myDbContext.ResumeTransaction(transactionId, TransactionType.TwoPhaseCommit); +``` + +{% capture notice--info %} +**Note** + +The `ResumeTransaction` method doesn't have an asynchronous version because it only initializes the transaction data in the `ScalarDbContext` inheriting object without querying the cluster. Because of this, resuming a transaction by using the wrong ID is possible. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Commit a transaction (ordinary or two-phase commit) + +```c# +await _myDbContext.CommitTransactionAsync(); +``` + +### Roll back a transaction (ordinary or two-phase commit) + +```c# +await _myDbContext.RollbackTransactionAsync(); +``` + +### Prepare a transaction with the two-phase commit interface for the commit + +```c# +await _myDbContext.PrepareTransactionAsync(); +``` + +### Validate a transaction with the two-phase commit interface before the commit + +```c# +await _myDbContext.ValidateTransactionAsync(); +``` diff --git a/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-scalardb-tables-as-csharp-classes.md b/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-scalardb-tables-as-csharp-classes.md new file mode 100644 index 00000000..62c6c376 --- /dev/null +++ b/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-scalardb-tables-as-csharp-classes.md @@ -0,0 +1,184 @@ +# Getting Started with Tables as C# Classes in the ScalarDB Cluster .NET Client SDK + +The ScalarDB Cluster .NET Client SDK helps you write code to access a cluster by abstracting ScalarDB tables as C# objects. After defining a class that represents a table in the cluster, you can ensure that a column name or its type won't be mixed up when querying the cluster. In addition, if a table's structure changes, you can apply the changes to the code by using the refactoring feature in your IDE. + +{% capture notice--info %} +**Note** + +Although we recommend using asynchronous methods, as in the following examples, you can use synchronous methods instead. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Create classes for all ScalarDB tables + +To work with ScalarDB tables as C# objects, you must create a class for each table that you want to use. For example: + +```c# +using System.ComponentModel.DataAnnotations.Schema; +using ScalarDB.Net.Client.DataAnnotations; + +// ... + +[Table("ns.statements")] +public class Statement +{ + [Column("order_id", Order = 0), PartitionKey] + public string OrderId { get; set; } = String.Empty; + + [Column("item_id", Order = 1), ClusteringKey] + public int ItemId { get; set; } + + [Column("count", Order = 2)] + public int Count { get; set; } +} +``` + +## Execute CRUD operations + +After creating a class for each table, you can use the classes as objects by using the generic `GetAsync`, `ScanAsync`, `InsertAsync`, `UpdateAsync`, `DeleteAsync`, `PutAsync`, or `MutateAsync` method of `IDistributedTransaction` (or more specifically, of `ITransactionCrudOperable`). + +To use these generic methods, add the following namespace to the `using` section: + +```c# +using ScalarDB.Net.Client.Extensions; +``` + +### Get one object by using the `GetAsync` method + +```c# +var keys = new Dictionary + { + { nameof(Statement.OrderId), "1" } + }; +var statement = await transaction.GetAsync(keys); + +Console.WriteLine($"ItemId: {statement.ItemId}, Count: {statement.Count}"); +``` + +### Get multiple objects by using the `ScanAsync` method + +```c# +var startKeys = new Dictionary + { + { nameof(Statement.OrderId), "1" }, + { nameof(Statement.ItemId), 3 } + }; +var endKeys = new Dictionary + { + { nameof(Statement.ItemId), 6} + }; +var statements = await transaction.ScanAsync(startKeys, endKeys); + +foreach (var s in statements) + Console.WriteLine($"ItemId: {s.ItemId}, Count: {s.Count}"); +``` + +### Insert a new object by using the `InsertAsync` method + +```c# +var statement = new Statement + { + OrderId = "2", + ItemId = 4, + Count = 8 + }; +await transaction.InsertAsync(statement); +``` + +### Update an object by using the `UpdateAsync` method + +```c# +// ... +statement.ItemId = 4; +statement.Count = 8; + +await transaction.UpdateAsync(statement); +``` + +### Delete an object by using the `DeleteAsync` method + +```c# +// ... +await transaction.DeleteAsync(statement); +``` + +### Upsert an object by using the `PutAsync` method + +```c# +var statement = new Statement + { + OrderId = "2", + ItemId = 4, + Count = 8 + }; +await transaction.PutAsync(statement); +``` + +### Put and delete multiple objects at once by using the `MutateAsync` method + +```c# +var statement = new Statement + { + OrderId = "2", + ItemId = 4, + Count = 16 + }; + +// ... + +await client.MutateAsync(objectsToPut: new[] { statement }, + objectsToDelete: new[] { statement2 }); +``` + +{% capture notice--info %} +**Note** + +To modify objects by using the `UpdateAsync`, `DeleteAsync`, `PutAsync`, or `MutateAsync` method, the objects must be retrieved first by using the `GetAsync` or `ScanAsync` method. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Use the Administrative API + +C# objects also can be used with the Administrative API. To use generic Administrative API methods, add the following namespace to the `using` section: + +```c# +using ScalarDB.Net.Client.Extensions; +``` + +### Create a new namespace + +```c# +await admin.CreateNamespaceAsync(); +``` + +### Drop an existing namespace + +```c# +await admin.DropNamespaceAsync(); +``` + +### Check if a namespace exists + +```c# +var namespaceExists = await admin.IsNamespacePresentAsync(); +``` + +### Create a new table + +```c# +await admin.CreateTableAsync(); +``` + +### Drop an existing table + +```c# +await admin.DropTableAsync(); +``` + +### Check if a table exists + +```c# +var tableExists = await admin.IsTablePresentAsync(); +``` diff --git a/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-two-phase-commit-transactions.md b/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-two-phase-commit-transactions.md new file mode 100644 index 00000000..30138526 --- /dev/null +++ b/docs/3.12/scalardb-cluster-dotnet-client-sdk/getting-started-with-two-phase-commit-transactions.md @@ -0,0 +1,120 @@ +# Getting Started with Distributed Transactions with a Two-Phase Commit Interface in the ScalarDB Cluster .NET Client SDK + +The ScalarDB Cluster .NET Client SDK supports transactions with the two-phase commit interface in ScalarDB. The SDK includes transaction and manager abstractions for enhanced communication within a cluster. + +{% capture notice--info %} +**Note** + +Although we recommend using asynchronous methods as in the following examples, you can use synchronous methods instead. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## About transactions with the two-phase commit interface + +By using the SDK, you can execute transactions with the two-phase commit interface that span multiple applications. For example, if you have multiple microservices, you can create a transaction manager in each of them and execute a transaction that spans those microservices. + +In transactions with the two-phase commit interface, there are two roles—coordinator and a participant—that collaboratively execute a single transaction. + +The coordinator process first begins a transaction and sends the ID of the transaction to all the participants, and the participant processes join the transaction. After executing CRUD or SQL operations, the coordinator process and the participant processes commit the transaction by using the two-phase interface. + +## Get a transaction manager (for coordinator and participants) + +First, you need to get a transaction manager for distributed transactions with the two-phase commit interface. To get the transaction manager, you can use `TransactionFactory` as follows, replacing `` with the FQDN or the IP address, and `` with the port number (`60053` by default) of your cluster: + +```c# +var scalarDbOptions = new ScalarDbOptions + { + Address = "http://:", + HopLimit = 10 + }; +var factory = TransactionFactory.Create(scalarDbOptions); + +using var manager = factory.GetTwoPhaseCommitTransactionManager(); +``` + +Alternatively, you can use SQL instead of CRUD operations for transactions with the two-phase commit interface by specifying the following transaction manager: + +```c# +using var manager = factory.GetSqlTwoPhaseCommitTransactionManager(); +``` + +## Begin a transaction (for coordinator) + +You can begin a transaction with the two-phase commit interface in the coordinator as follows: + +```c# +var transaction = await manager.BeginAsync(); +``` + +The ID of the started transaction can be obtained with the following code: + +```c# +var transactionId = transaction.Id; +``` + +## Join a transaction (for participants) + +You can join a transaction with the two-phase commit interface in a participant as follows: + +```c# +var transaction = await manager.JoinAsync(transactionId); +``` + +## Resume a transaction (for coordinator and participants) + +Usually, a transaction with the two-phase commit interface involves multiple request and response exchanges. In scenarios where you need to work with a transaction that has been begun or joined in the previous request, you can resume such transaction as follows: + +```c# +var transaction = manager.Resume(transactionId); +``` + +{% capture notice--info %} +**Note** + +The `Resume` method doesn't have an asynchronous version because it only creates a transaction object. Because of this, resuming a transaction by using the wrong ID is possible. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Roll back a transaction + +If a transaction fails to commit, you can roll back the transaction as follows: + +```c# +await transaction.RollbackAsync(); +``` + +## Commit a transaction (for coordinator and participants) + +After completing CRUD or SQL operations, you must commit the transaction. However, for transactions with the two-phase commit interface, you must prepare the transaction in the coordinator and all the participants first. + +```c# +await transaction.PrepareAsync(); +``` + +Next, depending on the concurrency control protocol, you may need to validate the transaction in the coordinator and all the participants as follows: + +```c# +await transaction.ValidateAsync(); +``` + +Finally, you can commit the transaction in the coordinator and all the participants as follows: + +```c# +await transaction.CommitAsync(); +``` + +If the coordinator or any of the participants failed to prepare or validate the transaction, you will need to call `RollbackAsync` in the coordinator and all the participants. + +In addition, if the coordinator and all the participants failed to commit the transaction, you will need to call `RollbackAsync` in the coordinator and all the participants. + +However, if the coordinator or only some of the participants failed to commit the transaction, the transaction will be regarded as committed as long as the coordinator or any one of the participants has succeeded in committing the transaction. + +## Execute CRUD operations + +The two-phase commit interface of the transaction has the same methods for CRUD operations as ordinary transactions. For details, see [Execute CRUD operations](getting-started-with-distributed-transactions.md#execute-crud-operations). + +## Execute SQL statements + +The two-phase commit interface of the SQL transaction has the same methods for executing SQL queries as ordinary SQL transactions. For details, see [Execute SQL queries](getting-started-with-distributed-sql-transactions.md#execute-sql-queries). diff --git a/docs/3.12/scalardb-cluster-dotnet-client-sdk/index.md b/docs/3.12/scalardb-cluster-dotnet-client-sdk/index.md new file mode 100644 index 00000000..c6959684 --- /dev/null +++ b/docs/3.12/scalardb-cluster-dotnet-client-sdk/index.md @@ -0,0 +1,12 @@ +# Getting Started with the ScalarDB Cluster .NET Client SDK + +The ScalarDB Cluster .NET Client SDK is a .NET Standard 2.0 library that can be used with various .NET versions. For details about .NET Standard and its versions, see [.NET Standard](https://dotnet.microsoft.com/en-us/platform/dotnet-standard). + +* [Getting Started with Distributed Transactions](getting-started-with-distributed-transactions.md) +* [Getting Started with Distributed SQL Transactions](getting-started-with-distributed-sql-transactions.md) +* [Getting Started with the Administrative API](getting-started-with-admin-api.md) +* [Getting Started with ScalarDB Tables as C# Classes](getting-started-with-scalardb-tables-as-csharp-classes.md) +* [Getting Started with ASP.NET Core and Dependency Injection](getting-started-with-aspnet-and-di.md) +* [Getting Started with LINQ](getting-started-with-linq.md) +* [Getting Started with Distributed Transactions with a Two-Phase Commit Interface](getting-started-with-two-phase-commit-transactions.md) +* [Getting Started with ScalarDB Auth](getting-started-with-auth.md) diff --git a/docs/3.12/scalardb-cluster/developer-guide-for-scalardb-cluster-with-java-api.md b/docs/3.12/scalardb-cluster/developer-guide-for-scalardb-cluster-with-java-api.md new file mode 100644 index 00000000..b09fe6da --- /dev/null +++ b/docs/3.12/scalardb-cluster/developer-guide-for-scalardb-cluster-with-java-api.md @@ -0,0 +1,274 @@ +# Developer Guide for ScalarDB Cluster with the Java API + +ScalarDB Cluster provides a Java API for developing applications. +This document explains how to use the Java API. + +## Add ScalarDB Cluster Java Client SDK to your build + +The ScalarDB Cluster Java Client SDK is available in the [Maven Central Repository](https://mvnrepository.com/artifact/com.scalar-labs/scalardb-cluster-java-client-sdk). + +To add a dependency on the ScalarDB Cluster Java Client SDK by using Gradle, use the following: + +```gradle +dependencies { + implementation 'com.scalar-labs:scalardb-cluster-java-client-sdk:3.12.0' +} +``` + +To add a dependency by using Maven, use the following: + +```xml + + com.scalar-labs + scalardb-cluster-java-client-sdk + 3.12.0 + +``` + +## Client modes + +The ScalarDB Cluster Java Client SDK supports two client modes: `indirect` and `direct-kubernetes`. The following describes the client modes. + +### `indirect` client mode + +This mode simply sends a request to any cluster node (typically via a load balancer, such as Envoy), and the cluster node receiving the request routes the request to the appropriate cluster node that has the transaction state. + +![ScalarDB Cluster architecture](images/indirect-client-mode.png) + +The advantage of this mode is that we can keep the client thin. +The disadvantage is that we need an additional hop to reach the correct cluster node, which may affect performance. + +You can use this connection mode even if your application is running on a different Kubernetes cluster and your application can't access the Kubernetes API and each cluster node. +If your application is running on the same Kubernetes cluster as your ScalarDB Cluster nodes, you can use the `direct-kubernetes` client mode. + +### `direct-kubernetes` client mode + +In this mode, the client uses the membership logic (using the Kubernetes API) and the distribution logic (consistent hashing algorithm) to find the right cluster node that has the transaction state. +The client then sends a request to the cluster node directly. + +![ScalarDB Cluster architecture](images/direct-kubernetes-client-mode.png) + +The advantage of this mode is that we can reduce the hop count to reach the proper cluster node, which will improve the performance. +The disadvantage of this mode is that we need to make the client fat because the client needs to have membership logic and request-routing logic. + +Since this connection mode needs to access the Kubernetes API and each cluster node, you can use this connection mode only if your application is running on the same Kubernetes cluster as your ScalarDB Cluster nodes. +If your application is running on a different Kubernetes cluster, use the `indirect` client mode. + +For details about how to deploy your application on Kubernetes with `direct-kubernetes` client mode, see [Deploy your client application on Kubernetes with `direct-kubernetes` mode](https://github.com/scalar-labs/helm-charts/blob/main/docs/how-to-deploy-scalardb-cluster.md#deploy-your-client-application-on-kubernetes-with-direct-kubernetes-mode). + +## ScalarDB Cluster Java API + +The ScalarDB Cluster Java Client SDK provides a Java API for applications to access ScalarDB Cluster. The following diagram shows the architecture of the ScalarDB Cluster Java API. + +``` + +------------------+ + | User/Application | + +------------------+ + ↓ Java API + +--------------+ + | ScalarDB API | + +--------------+ + ↓ gRPC + +------------------+ + | ScalarDB Cluster | + +------------------+ + ↓ DB vendor–specific protocol + +----+ + | DB | + +----+ +``` + +Using the ScalarDB Cluster Java API is almost the same as using the ScalarDB Java API except the client configurations and Schema Loader are different. +For details, see [ScalarDB Java API Guide](https://github.com/scalar-labs/scalardb/blob/master/docs/api-guide.md). + +The following section describes the client configurations for the ScalarDB Cluster Java API and Schema Loader for Cluster. + +### Client configurations + +The following table shows the client configurations for the ScalarDB Cluster Java API. + +| Name | Description | Default | +|----------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------| +| `scalar.db.transaction_manager` | `cluster` should be specified. | - | +| `scalar.db.contact_points` | Contact point of the cluster. If you use the `indirect` client mode, specify the IP address of the load balancer in front of your cluster nodes by using the format `indirect:`. If you use the `direct-kubernetes` client mode, specify the namespace name (optional) and the name of the [endpoint resource](https://kubernetes.io/docs/concepts/services-networking/service/#endpoints) to get the membership information by using the format `direct-kubernetes:/` or just `direct-kubernetes:`. If you don't specify the namespace name, the client will use the `default` namespace. | | +| `scalar.db.cluster.grpc.deadline_duration_millis` | Deadline duration for gRPC in millis. | `60000` (60 seconds) | +| `scalar.db.cluster.grpc.max_inbound_message_size` | Maximum message size allowed for a single gRPC frame. | The gRPC default value | +| `scalar.db.cluster.grpc.max_inbound_metadata_size` | Maximum size of metadata allowed to be received. | The gRPC default value | + +For example, if you use the `indirect` client mode and the load balancer IP address is `192.168.10.1`, you can configure the client as follows: + +```properties +scalar.db.transaction_manager=cluster +scalar.db.contact_points=indirect:192.168.10.1 +``` + +Or if you use the `direct-kubernetes` client mode, with the namespace of the endpoint as `ns` and the endpoint name as `scalardb-cluster`, you can configure the client as follows: + +```properties +scalar.db.transaction_manager=cluster +scalar.db.contact_points=direct-kubernetes:ns/scalardb-cluster +``` + +### Schema Loader for Cluster + +To load a schema via ScalarDB Cluster, you need to use the dedicated Schema Loader for ScalarDB Cluster (Schema Loader for Cluster). +Using the Schema Loader for Cluster is basically the same as using the [ScalarDB Schema Loader](https://github.com/scalar-labs/scalardb/blob/master/docs/schema-loader.md) except the name of the JAR file is different. +You can download the Schema Loader for Cluster at [Releases](https://github.com/scalar-labs/scalardb-cluster/releases/tag/v3.12.0). +After downloading the JAR file, you can run Schema Loader for Cluster with the following command: + +```shell +java -jar scalardb-cluster-schema-loader-3.12.0-all.jar --config -f --coordinator +``` + +## ScalarDB Cluster SQL + +ScalarDB Cluster SQL can be accessed via JDBC and Spring Data JDBC for ScalarDB in Java as follows: + +``` + +-----------------------------------------+ + | User/Application | + +-----------------------------------------+ + ↓ ↓ Java API +Java API ↓ +-------------------------------+ + (JDBC) ↓ | Spring Data JDBC for ScalarDB | + ↓ +-------------------------------+ ++----------------------------------------------+ +| ScalarDB JDBC (ScalarDB SQL) | ++----------------------------------------------+ + ↓ gRPC + +----------------------+ + | ScalarDB Cluster SQL | + +----------------------+ + ↓ DB vendor–specific protocol + +----+ + | DB | + +----+ +``` + +This section describes how to use ScalarDB Cluster SQL though JDBC and Spring Data JDBC for ScalarDB. + +### ScalarDB Cluster SQL via JDBC + +Using ScalarDB Cluster SQL via JDBC is almost the same using [ScalarDB JDBC](https://github.com/scalar-labs/scalardb-sql/blob/main/docs/jdbc-guide.md) except for how to add the JDBC driver to your project. + +In addition to adding the ScalarDB Cluster Java Client SDK as described in [Add ScalarDB Cluster Java Client SDK to your build](#add-scalardb-cluster-java-client-sdk-to-your-build), you need to add the following dependencies to your project: + +To add the dependencies on the ScalarDB Cluster JDBC driver by using Gradle, use the following: + +```gradle +dependencies { + implementation 'com.scalar-labs:scalardb-sql-jdbc:3.12.0' + implementation 'com.scalar-labs:scalardb-cluster-java-client-sdk:3.12.0' +} +``` + +To add the dependencies by using Maven, use the following: + +```xml + + + com.scalar-labs + scalardb-sql-jdbc + 3.12.0 + + + com.scalar-labs + scalardb-cluster-java-client-sdk + 3.12.0 + + +``` + +Other than that, using ScalarDB Cluster SQL via JDBC is the same as using ScalarDB JDBC. +For details about ScalarDB JDBC, see [ScalarDB JDBC Guide](https://github.com/scalar-labs/scalardb-sql/blob/main/docs/jdbc-guide.md). + +### ScalarDB Cluster SQL via Spring Data JDBC for ScalarDB + +Similar to ScalarDB Cluster SQL via JDBC, using ScalarDB Cluster SQL via Spring Data JDBC for ScalarDB is almost the same as using [Spring Data JDBC for ScalarDB](https://github.com/scalar-labs/scalardb-sql/blob/main/docs/spring-data-guide.md) except for how to add it to your project. + +In addition to adding the ScalarDB Cluster Java Client SDK as described in [Add ScalarDB Cluster Java Client SDK to your build](#add-scalardb-cluster-java-client-sdk-to-your-build), you need to add the following dependencies to your project: + +To add the dependencies by using Gradle, use the following: + +```gradle +dependencies { + implementation 'com.scalar-labs:scalardb-sql-spring-data:3.12.0' + implementation 'com.scalar-labs:scalardb-cluster-java-client-sdk:3.12.0' +} +``` + +To add the dependencies by using Maven, use the following: + +```xml + + + com.scalar-labs + scalardb-sql-spring-data + 3.12.0 + + + com.scalar-labs + scalardb-cluster-java-client-sdk + 3.12.0 + + +``` + +Other than that, using ScalarDB Cluster SQL via Spring Data JDBC for ScalarDB is the same as using Spring Data JDBC for ScalarDB. +For details about Spring Data JDBC for ScalarDB, see [Guide of Spring Data JDBC for ScalarDB](https://github.com/scalar-labs/scalardb-sql/blob/main/docs/spring-data-guide.md). + +### ScalarDB Cluster SQL client configurations + +The following table shows the configurations for ScalarDB Cluster SQL. + +| Name | Description | Default | +|----------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------| +| `scalar.db.sql.connection_mode` | `cluster` should be specified. | - | +| `scalar.db.sql.cluster_mode.contact_points` | Contact point of the cluster. If you use the `indirect` client mode, specify the IP address of the load balancer in front of your cluster nodes by using the format `indirect:`. If you use the `direct-kubernetes` client mode, specify the namespace name (optional) and the name of the [endpoint resource](https://kubernetes.io/docs/concepts/services-networking/service/#endpoints) to get the membership information by using the format `direct-kubernetes:/` or just `direct-kubernetes:`. If you don't specify the namespace name, the client will use the `default` namespace. | | +| `scalar.db.cluster.grpc.deadline_duration_millis` | Deadline duration for gRPC in millis. | `60000` (60 seconds) | +| `scalar.db.cluster.grpc.max_inbound_message_size` | Maximum message size allowed for a single gRPC frame. | The gRPC default value | +| `scalar.db.cluster.grpc.max_inbound_metadata_size` | Maximum size of metadata allowed to be received. | The gRPC default value | + +For example, if you use the `indirect` client mode and the load balancer IP address is `192.168.10.1`, you can configure the client as follows: + +```properties +scalar.db.sql.connection_mode=cluster +scalar.db.sql.cluster_mode.contact_points=indirect:192.168.10.1 +``` + +Or if you use the `direct-kubernetes` client mode, with the namespace of the endpoint as `ns` and the endpoint name as `scalardb-cluster`, you can configure the client as follows: + +```properties +scalar.db.sql.connection_mode=cluster +scalar.db.sql.cluster_mode.contact_points=direct-kubernetes:ns/scalardb-cluster +``` + +For details about how to configure ScalarDB JDBC, see [JDBC connection URL](https://github.com/scalar-labs/scalardb-sql/blob/main/docs/jdbc-guide.md#jdbc-connection-url). + +For details about how to configure Spring Data JDBC for ScalarDB, see [Configurations](https://github.com/scalar-labs/scalardb-sql/blob/main/docs/spring-data-guide.md#configurations). + +### SQL CLI for Cluster + +You need to use the dedicated SQL CLI for ScalarDB Cluster (SQL CLI for Cluster). + +Using the SQL CLI for Cluster is basically the same as using the [ScalarDB SQL Command Line Interface](https://github.com/scalar-labs/scalardb-sql/blob/main/docs/command-line-interface.md) except the name of the JAR file is different. +You can download the SQL CLI for Cluster from [Releases](https://github.com/scalar-labs/scalardb-cluster/releases/tag/v3.12.0). +After downloading the JAR file, you can run SQL CLI for Cluster with the following command: + +```shell +java -jar scalardb-cluster-sql-cli-3.12.0-all.jar --config +``` + +## Further reading + +If you want to use ScalarDB Cluster in programming languages other than Java, you can use the ScalarDB Cluster gRPC API. +For details about the ScalarDB Cluster gRPC API, refer to the following: + +* [ScalarDB Cluster gRPC API Guide](scalardb-cluster-grpc-api-guide.md) +* [ScalarDB Cluster SQL gRPC API Guide](scalardb-cluster-sql-grpc-api-guide.md) + +JavaDocs are also available: + +* [ScalarDB Cluster Java Client SDK](https://javadoc.io/doc/com.scalar-labs/scalardb-cluster-java-client-sdk/3.12.0/index.html) +* [ScalarDB Cluster Common](https://javadoc.io/doc/com.scalar-labs/scalardb-cluster-common/3.12.0/index.html) +* [ScalarDB Cluster RPC](https://javadoc.io/doc/com.scalar-labs/scalardb-cluster-rpc/3.12.0/index.html) diff --git a/docs/3.12/scalardb-cluster/getting-started-with-scalardb-cluster-graphql.md b/docs/3.12/scalardb-cluster/getting-started-with-scalardb-cluster-graphql.md new file mode 100644 index 00000000..43b52d59 --- /dev/null +++ b/docs/3.12/scalardb-cluster/getting-started-with-scalardb-cluster-graphql.md @@ -0,0 +1,336 @@ +# Getting Started with ScalarDB Cluster GraphQL + +This tutorial describes how to use ScalarDB Cluster GraphQL. + +## Prerequisites + +- One of the following Java Development Kits (JDKs): + - [Oracle JDK](https://www.oracle.com/java/technologies/downloads/) LTS version (8, 11, or 17) + - [OpenJDK](https://openjdk.org/install/) LTS version (8, 11, or 17) +- ScalarDB Cluster running on a Kubernetes cluster + - We assume that you have a ScalarDB Cluster running on a Kubernetes cluster that you deployed by following the instructions in [Set Up ScalarDB Cluster on Kubernetes by Using a Helm Chart](setup-scalardb-cluster-on-kubernetes-by-using-helm-chart.md). + +{% capture notice--info %} +**Note** + +We recommend using the LTS versions mentioned above, but other non-LTS versions may work. + +In addition, other JDKs should work with ScalarDB, but we haven't tested them. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Sample application + +This tutorial illustrates the process of creating an electronic money application, where money can be transferred between accounts. + +The following diagram shows the system architecture of the sample application: + +``` + +----------------------------------------------------------------------------------------------------------------------------------------+ + | [Kubernetes Cluster] | + | | + | [Pod] [Pod] [Pod] | + | | + | +-------+ | + | +---> | Envoy | ---+ | + | | +-------+ | | + | | | | + +------------------------+ | +---------+ | +-------+ | +--------------------+ | + | Schema Loader | --+-> | Service | ---+---> | Envoy | ---+---------> | Service | ---+ | + | (indirect client mode) | | | (Envoy) | | +-------+ | | (ScalarDB Cluster) | | | + +------------------------+ | +---------+ | | +--------------------+ | +-----------------------+ | + | | +-------+ | | +---> | ScalarDB Cluster Node | ---+ | + | +---> | Envoy | ---+ | | +-----------------------+ | | + | +-------+ | | | | + | | | +-----------------------+ | +------------+ | + | +---+---> | ScalarDB Cluster Node | ---+---> | PostgreSQL | | + | | | +-----------------------+ | +------------+ | + | | | | | + | | | +-----------------------+ | | + | | +---> | ScalarDB Cluster Node | ---+ | + | | +-----------------------+ | + +------------+ | +----------------------------+ | | + | Browser | ------+---------------------------------------> | Service | ---+ | + | (GraphiQL) | | | (ScalarDB Cluster GraphQL) | | + +------------+ | +----------------------------+ | + | | + +----------------------------------------------------------------------------------------------------------------------------------------+ +``` + +## Step 1. Create `schema.json` + +The following is a simple example schema. + +Create `schema.json`, and add the following to the file: + +```json +{ + "emoney.account": { + "transaction": true, + "partition-key": [ + "id" + ], + "clustering-key": [], + "columns": { + "id": "TEXT", + "balance": "INT" + } + } +} +``` + +## Step 2. Create `database.properties` + +You need to create `database.properties` for the Schema Loader for ScalarDB Cluster. +But first, you need to get the `EXTERNAL-IP` address of the service resource of Envoy (`scalardb-cluster-envoy`). + +To see the `EXTERNAL-IP` address, run the following command: + +```shell +$ kubectl get svc scalardb-cluster-envoy +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +scalardb-cluster-envoy LoadBalancer 10.105.121.51 localhost 60053:30641/TCP 16h +``` + +In this case, the `EXTERNAL-IP` address is `localhost`. + +Then, create `database.properties`, and add the following to the file: + +```properties +scalar.db.transaction_manager=cluster +scalar.db.contact_points=indirect:localhost +``` + +To connect to ScalarDB Cluster, you need to specify `cluster` for the `scalar.db.transaction_manager` property. +In addition, you will use the `indirect` client mode and connect to the service resource of Envoy in this tutorial. +For details about the client modes, see [Developer Guide for ScalarDB Cluster with the Java API](developer-guide-for-scalardb-cluster-with-java-api.md). + +## Step 3. Load a schema + +To load a schema via ScalarDB Cluster, you need to use the dedicated Schema Loader for ScalarDB Cluster (Schema Loader for Cluster). +Using the Schema Loader for Cluster is basically the same as using the [Schema Loader for ScalarDB](https://github.com/scalar-labs/scalardb/blob/master/docs/schema-loader.md) except the name of the JAR file is different. +You can download the Schema Loader for Cluster at [Releases](https://github.com/scalar-labs/scalardb-cluster/releases/tag/v3.12.0). +After downloading the JAR file, you can run the Schema Loader for Cluster with the following command: + +```shell +$ java -jar scalardb-cluster-schema-loader-3.12.0-all.jar --config database.properties -f schema.json --coordinator +``` + +## Step 4. Run operations from GraphiQL + +In ScalarDB Cluster, if the `scalar.db.graphql.graphiql` property is set to `true` (`true` is the default value), the GraphiQL IDE will be available. + +To get the `EXTERNAL-IP` address of the service resource of ScalarDB Cluster GraphQL (`scalardb-cluster-graphql`), run the following command: + +```shell +$ kubectl get svc scalardb-cluster-graphql +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +scalardb-cluster-graphql LoadBalancer 10.105.74.214 localhost 8080:30514/TCP 16h +``` + +In this case, the `EXTERNAL-IP` address is `localhost`, and the endpoint URL of GraphiQL IDE is `http://localhost:8080/graphql`. +Opening that URL with your web browser will take you to the GraphiQL screen. + +Let's insert the first record. +In the left pane, paste the following mutation, then push the triangle-shaped `Execute Query` button at the top of the window. + +```graphql +mutation PutUser1 { + account_put(put: {key: {id: "user1"}, values: {balance: 1000}}) +} +``` + +ScalarDB GraphQL always runs queries with transactions. +The above query starts a new transaction, executes a ScalarDB Put command, and commits the transaction at the end of the execution. + +The following response from the GraphQL server will appear in the right pane: + +```json +{ + "data": { + "account_put": true + } +} +``` + +The `"data"` field contains the result of the execution. +This response shows the `account_put` field of the mutation was successful. +The result type of mutations is `Boolean!`, which indicates whether the operation succeeded or not. + +Next, let's get the record you just inserted. +Paste the following query next to the previous mutation in the left pane, and click the `Execute Query` button. +Since you don't delete the `mutation PutUser1` above, a pull-down menu will appear below the button, and you can choose which operation should be executed. Choose `GetUser1`, as shown below: + +```graphql +query GetUser1 { + account_get(get: {key: {id: "user1"}}) { + account { + id + balance + } + } +} +``` + +You should get the following result in the right pane: + +```json +{ + "data": { + "account_get": { + "account": { + "id": "user1", + "balance": 1000 + } + } + } +} +``` + +### Mappings between GraphQL API and ScalarDB Java API + +The automatically generated GraphQL schema defines queries, mutations, and object types for input/output to allow you to run CRUD operations for all the tables in the specified namespaces. +These operations are designed to match the ScalarDB APIs defined in the [`DistributedTransaction`](https://javadoc.io/doc/com.scalar-labs/scalardb/latest/com/scalar/db/api/DistributedTransaction.html) interface. + +Assuming you have an `account` table in a namespace, the following queries and mutations will be generated: + +| ScalarDB API | GraphQL root type | GraphQL field | +|--------------------------------------------------------|-------------------|------------------------------------------------------------------------------------| +| `get(Get get)` | `Query` | `account_get(get: account_GetInput!): account_GetPayload` | +| `scan(Scan scan)` | `Query` | `account_scan(scan: account_ScanInput!): account_ScanPayload` | +| `put(Put put)` | `Mutation` | `account_put(put: account_PutInput!): Boolean!` | +| `put(java.util.List puts)` | `Mutation` | `account_bulkPut(put: [account_PutInput!]!): Boolean!` | +| `delete(Delete delete)` | `Mutation` | `account_delete(delete: account_DeleteInput!): Boolean!` | +| `delete(java.util.List deletes)` | `Mutation` | `account_bulkDelete(delete: [account_DeleteInput!]!): Boolean!` | +| `mutate(java.util.List mutations)` | `Mutation` | `account_mutate(put: [account_PutInput!]delete: [account_DeleteInput!]): Boolean!` | + +Note that the `scan` field is not generated for a table with no clustering key. +This is the reason why the `account_scan` field is not available in this electronic money sample application. + +You can see all generated GraphQL types in GraphiQL's Documentation Explorer (the `< Docs` link at the top-left corner). + +## Step 5. Run a transaction across multiple requests from GraphiQL + +Let's run a transaction that spans multiple GraphQL requests. + +The generated schema provides the `@transaction` directive that allows you to identify transactions. +You can use this directive with both queries and mutations. + +Before starting a transaction, you need to insert the necessary record with the following mutation: + +```graphql +mutation PutUser2 { + account_put(put: {key: {id: "user2"}, values: {balance: 1000}}) +} +``` + +### Start a transaction before running an operation + +Running the following to add a `@transaction` directive with no arguments to a query or mutation directs the execution to start a new transaction: + +```graphql +query GetAccounts @transaction { + user1: account_get(get: {key: {id: "user1"}}) { + account { balance } + } + user2: account_get(get: {key: {id: "user2"}}) { + account { balance } + } +} +``` + +After running the above command, you will get a result with a transaction ID in the `extensions` field. +The `id` value in the extensions is the transaction ID in which the operation in the request was run. +In this case, the following is the new ID of the transaction just started by the request: + +```json +{ + "data": { + "user1": { + "account": { + "balance": 1000 + } + }, + "user2": { + "account": { + "balance": 1000 + } + } + }, + "extensions": { + "transaction": { + "id": "c88da8a6-a13f-4857-82fe-45f1ab4150f9" + } + } +} +``` + +### Run an operation in a continued transaction + +To run the next queries or mutations in the transaction you started, specify the transaction ID as the `id` argument of the `@transaction`. +The following example updates two accounts you retrieved in the previous example by transferring a balance from user1's account to user2's account in the same transaction: + +```graphql +mutation Transfer @transaction(id: "c88da8a6-a13f-4857-82fe-45f1ab4150f9") { + user1: account_put(put: {key: {id: "user1"}, values: {balance: 750}}) + user2: account_put(put: {key: {id: "user2"}, values: {balance: 1250}}) +} +``` + +Note that a transaction started with GraphQL has a timeout of 1 minute (by default) and will be aborted automatically if it exceeds the timeout. + +### Commit a transaction + +To commit the continued transaction, specify both the `id` and the `commit: true` flag as arguments of the `@transaction` directive: + +```graphql +query GetAndCommit @transaction(id: "c88da8a6-a13f-4857-82fe-45f1ab4150f9", commit: true) { + user1: account_get(get: {key: {id: "user1"}}) { + account { balance } + } + user2: account_get(get: {key: {id: "user2"}}) { + account { balance } + } +} +``` + +**Note:** If you specify a `commit: true` flag without an `id` argument like `@transaction(commit: true)`, a new transaction will start and be committed just for one operation. +This behavior is exactly the same as not specifying the `@transaction` directive, as seen in the above examples using GraphiQL. +In other words, you can omit the directive itself when `@transaction(commit: true)` is specified. + +### Abort or roll back a transaction + +If you need to abort or roll back a transaction explicitly, you can use the `abort` or `rollback` mutation fields interchangeably (both have the same effect and usage). +Note that you cannot mix these fields with any other operations, so you must specify only the `abort` or `rollback` mutation field as follows: + +```graphql +mutation AbortTx @transaction(id: "c88da8a6-a13f-4857-82fe-45f1ab4150f9") { + abort +} +``` + +Or: + +```graphql +mutation RollbackTx @transaction(id: "c88da8a6-a13f-4857-82fe-45f1ab4150f9") { + rollback +} +``` + +## Next steps + +If you have not tried the other ScalarDB Cluster tutorials, we encourage you to read the following: + +* [Getting Started with ScalarDB Cluster](getting-started-with-scalardb-cluster.md) +* [Getting Started with ScalarDB Cluster SQL via JDBC](getting-started-with-scalardb-cluster-sql-jdbc.md) +* [Getting Started with ScalarDB Cluster SQL via Spring Data JDBC for ScalarDB](getting-started-with-scalardb-cluster-sql-spring-data-jdbc.md) + +For details about developing applications that use ScalarDB Cluster with the Java API, refer to the following: + +* [Developer Guide for ScalarDB Cluster with the Java API](developer-guide-for-scalardb-cluster-with-java-api.md) + +For details about ScalarDB Cluster gRPC API, refer to the following: + +* [ScalarDB Cluster gRPC API Guide](scalardb-cluster-grpc-api-guide.md) +* [ScalarDB Cluster SQL gRPC API Guide](scalardb-cluster-sql-grpc-api-guide.md) diff --git a/docs/3.12/scalardb-cluster/getting-started-with-scalardb-cluster-overview.md b/docs/3.12/scalardb-cluster/getting-started-with-scalardb-cluster-overview.md new file mode 100644 index 00000000..fb4177eb --- /dev/null +++ b/docs/3.12/scalardb-cluster/getting-started-with-scalardb-cluster-overview.md @@ -0,0 +1,14 @@ +--- +toc: false +--- + +# Getting Started with ScalarDB Cluster + +The following are tutorials for getting started with using ScalarDB Cluster: + +- [Getting Started with ScalarDB Cluster](getting-started-with-scalardb-cluster.md) +- [Getting Started with ScalarDB Cluster GraphQL](getting-started-with-scalardb-cluster-graphql.md) +- [Getting Started with ScalarDB Cluster SQL via JDBC](getting-started-with-scalardb-cluster-sql-jdbc.md) +- [Getting Started with ScalarDB Cluster SQL via Spring Data JDBC for ScalarDB](getting-started-with-scalardb-cluster-sql-spring-data-jdbc.md) +- [Getting Started with Using Python for ScalarDB Cluster](getting-started-with-using-python-for-scalardb-cluster.md) +- [Getting Started with Using Go for ScalarDB Cluster](getting-started-with-using-go-for-scalardb-cluster.md) diff --git a/docs/3.12/scalardb-cluster/getting-started-with-scalardb-cluster-sql-jdbc.md b/docs/3.12/scalardb-cluster/getting-started-with-scalardb-cluster-sql-jdbc.md new file mode 100644 index 00000000..4339c050 --- /dev/null +++ b/docs/3.12/scalardb-cluster/getting-started-with-scalardb-cluster-sql-jdbc.md @@ -0,0 +1,253 @@ +# Getting Started with ScalarDB Cluster SQL via JDBC + +This tutorial describes how to create a sample application by using ScalarDB Cluster SQL via JDBC. +You'll be using the same sample application as found in the [ScalarDB SQL (JDBC) Sample](https://github.com/scalar-labs/scalardb-samples/tree/main/scalardb-sql-jdbc-sample). + +## Prerequisites + +- One of the following Java Development Kits (JDKs): + - [Oracle JDK](https://www.oracle.com/java/technologies/downloads/) LTS version (8, 11, or 17) + - [OpenJDK](https://openjdk.org/install/) LTS version (8, 11, or 17) +- ScalarDB Cluster running on a Kubernetes cluster + - We assume that you have a ScalarDB Cluster running on a Kubernetes cluster that you deployed by following the instructions in [Set Up ScalarDB Cluster on Kubernetes by Using a Helm Chart](setup-scalardb-cluster-on-kubernetes-by-using-helm-chart.md). + +{% capture notice--info %} +**Note** + +We recommend using the LTS versions mentioned above, but other non-LTS versions may work. + +In addition, other JDKs should work with ScalarDB, but we haven't tested them. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Sample application + +This tutorial illustrates the process of creating a sample e-commerce application, where items can be ordered and paid for with a credit card by using ScalarDB. +For details about the sample application, see the [sample application for ScalarDB SQL (JDBC)](https://github.com/scalar-labs/scalardb-samples/tree/main/scalardb-sql-jdbc-sample#sample-application). + +The following diagram shows the system architecture of the sample application: + +``` + +------------------------------------------------------------------------------------------------------------------------------+ + | [Kubernetes Cluster] | + | | + | [Pod] [Pod] [Pod] | + +------------------------+ | | + | SQL CLI | | +-------+ +-----------------------+ | + | (indirect client mode) | --+ | +---> | Envoy | ---+ +---> | ScalarDB Cluster Node | ---+ | + +------------------------+ | | | +-------+ | | +-----------------------+ | | + | | | | | | | + | | +---------+ | +-------+ | +--------------------+ | +-----------------------+ | +------------+ | + +--+-> | Service | ---+---> | Envoy | ---+---> | Service | ---+---> | ScalarDB Cluster Node | ---+---> | PostgreSQL | | + | | | (Envoy) | | +-------+ | | (ScalarDB Cluster) | | +-----------------------+ | +------------+ | + +------------------------+ | | +---------+ | | +--------------------+ | | | + | Sample application | | | | +-------+ | | +-----------------------+ | | + | with ScalarDB JDBC | --+ | +---> | Envoy | ---+ +---> | ScalarDB Cluster Node | ---+ | + | (indirect client mode) | | +-------+ +-----------------------+ | + +------------------------+ | | + +------------------------------------------------------------------------------------------------------------------------------+ +``` + +## Step 1. Clone the ScalarDB Samples repository + +```shell +$ git clone https://github.com/scalar-labs/scalardb-samples.git +$ cd scalardb-samples/scalardb-sql-jdbc-sample +``` + +## Step 2. Modify `build.gradle` + +To use ScalarDB Cluster, you need to modify `build.gradle`: + +```shell +$ vim build.gradle +``` + +Then, delete the existing dependency for `com.scalar-labs:scalardb-sql-direct-mode:3.12.0` from the `dependencies` section, and add the following dependency to the `dependencies` section: + +```gradle +dependencies { + ... + + implementation 'com.scalar-labs:scalardb-cluster-java-client-sdk:3.12.0' +} +``` + +## Step 3. Modify `scalardb-sql.properties` + +You need to modify `scalardb-sql.properties` to connect to ScalarDB Cluster as well. +But before doing so, you need to get the `EXTERNAL-IP` address of the service resource of Envoy (`scalardb-cluster-envoy`) as follows: + +```shell +$ kubectl get svc scalardb-cluster-envoy +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +scalardb-cluster-envoy LoadBalancer 10.105.121.51 localhost 60053:30641/TCP 16h +``` + +In this case, the `EXTERNAL-IP` address is `localhost`. + +Next, open `scalardb-sql.properties`: + +```shell +$ vim scalardb-sql.properties +``` + +Then, modify `scalardb-sql.properties` as follows: + +```properties +scalar.db.sql.connection_mode=cluster +scalar.db.sql.cluster_mode.contact_points=indirect:localhost +``` + +To connect to ScalarDB Cluster, you need to specify `cluster` for the `scalar.db.sql.connection_mode` property. +In addition, you will use the `indirect` client mode and connect to the service resource of Envoy in this tutorial. +For details about the client modes, see [Developer Guide for ScalarDB Cluster with the Java API](developer-guide-for-scalardb-cluster-with-java-api.md). + +## Step 4. Load a schema + +To load a schema via ScalarDB Cluster SQL, you need to use the dedicated SQL CLI for ScalarDB Cluster (SQL CLI for Cluster). +Using the SQL CLI for Cluster is basically the same as using the [ScalarDB SQL Command Line Interface](https://github.com/scalar-labs/scalardb-sql/blob/main/docs/command-line-interface.md) except the name of the JAR file is different. +You can download the SQL CLI for Cluster from [Releases](https://github.com/scalar-labs/scalardb-cluster/releases/tag/v3.12.0). +After downloading the JAR file, you can use SQL CLI for Cluster by running the following command: + +```shell +$ java -jar scalardb-cluster-sql-cli-3.12.0-all.jar --config scalardb-sql.properties --file schema.sql +``` + +## Step 5. Load the initial data + +Before running the sample application, you need to load the initial data by running the following command: + +```shell +$ ./gradlew run --args="LoadInitialData" +``` + +After the initial data has loaded, the following records should be stored in the tables: + +- For the `sample.customers` table: + +| customer_id | name | credit_limit | credit_total | +|-------------|---------------|--------------|--------------| +| 1 | Yamada Taro | 10000 | 0 | +| 2 | Yamada Hanako | 10000 | 0 | +| 3 | Suzuki Ichiro | 10000 | 0 | + +- For the `sample.items` table: + +| item_id | name | price | +|---------|--------|-------| +| 1 | Apple | 1000 | +| 2 | Orange | 2000 | +| 3 | Grape | 2500 | +| 4 | Mango | 5000 | +| 5 | Melon | 3000 | + +## Step 6. Run the sample application + +Let's start with getting information about the customer whose ID is `1`: + +```shell +$ ./gradlew run --args="GetCustomerInfo 1" +... +{"id": 1, "name": "Yamada Taro", "credit_limit": 10000, "credit_total": 0} +... +``` + +Then, place an order for three apples and two oranges by using customer ID `1`. +Note that the order format is `:,:,...`: + +```shell +$ ./gradlew run --args="PlaceOrder 1 1:3,2:2" +... +{"order_id": "454f9c97-f456-44fd-96da-f527187fe39b"} +... +``` + +You can see that running this command shows the order ID. + +Let's check the details of the order by using the order ID: + +```shell +$ ./gradlew run --args="GetOrder 454f9c97-f456-44fd-96da-f527187fe39b" +... +{"order": {"order_id": "454f9c97-f456-44fd-96da-f527187fe39b","timestamp": 1685602722821,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 1, "name": "Apple", "price": 1000, "count": 3},{"item_id": 2, "name": "Orange", "price": 2000, "count": 2}],"total": 7000}} +... +``` + +Then, let's place another order and get the order history of customer ID `1`: + +```shell +$ ./gradlew run --args="PlaceOrder 1 5:1" +... +{"order_id": "3f40c718-59ec-48aa-a6fe-2fdaf12ad094"} +... +$ ./gradlew run --args="GetOrders 1" +... +{"order": [{"order_id": "454f9c97-f456-44fd-96da-f527187fe39b","timestamp": 1685602722821,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 1, "name": "Apple", "price": 1000, "count": 3},{"item_id": 2, "name": "Orange", "price": 2000, "count": 2}],"total": 7000},{"order_id": "3f40c718-59ec-48aa-a6fe-2fdaf12ad094","timestamp": 1685602811718,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 5, "name": "Melon", "price": 3000, "count": 1}],"total": 3000}]} +... +``` + +This order history is shown in descending order by timestamp. + +The customer's current `credit_total` is `10000`. +Since the customer has now reached their `credit_limit`, which was shown when retrieving their information, they cannot place anymore orders. + +```shell +$ ./gradlew run --args="GetCustomerInfo 1" +... +{"id": 1, "name": "Yamada Taro", "credit_limit": 10000, "credit_total": 10000} +... +$ ./gradlew run --args="PlaceOrder 1 3:1,4:1" +... +java.lang.RuntimeException: Credit limit exceeded + at sample.Sample.placeOrder(Sample.java:184) + at sample.command.PlaceOrderCommand.call(PlaceOrderCommand.java:32) + at sample.command.PlaceOrderCommand.call(PlaceOrderCommand.java:8) + at picocli.CommandLine.executeUserObject(CommandLine.java:2041) + at picocli.CommandLine.access$1500(CommandLine.java:148) + at picocli.CommandLine$RunLast.executeUserObjectOfLastSubcommandWithSameParent(CommandLine.java:2461) + at picocli.CommandLine$RunLast.handle(CommandLine.java:2453) + at picocli.CommandLine$RunLast.handle(CommandLine.java:2415) + at picocli.CommandLine$AbstractParseResultHandler.execute(CommandLine.java:2273) + at picocli.CommandLine$RunLast.execute(CommandLine.java:2417) + at picocli.CommandLine.execute(CommandLine.java:2170) + at sample.command.SampleCommand.main(SampleCommand.java:35) +... +``` + +After making a payment, the customer will be able to place orders again. + +```shell +$ ./gradlew run --args="Repayment 1 8000" +... +$ ./gradlew run --args="GetCustomerInfo 1" +... +{"id": 1, "name": "Yamada Taro", "credit_limit": 10000, "credit_total": 2000} +... +$ ./gradlew run --args="PlaceOrder 1 3:1,4:1" +... +{"order_id": "fb71279d-88ea-4974-a102-0ec4e7d65e25"} +... +``` + +## Source code of the sample application + +To learn more about ScalarDB Cluster SQL JDBC, you can check the [source code of the sample application](https://github.com/scalar-labs/scalardb-samples/tree/main/scalardb-sql-jdbc-sample/src/main/java/sample). + +## Next steps + +If you have not tried the other ScalarDB Cluster tutorials, we encourage you to read the following: + +* [Getting Started with ScalarDB Cluster](getting-started-with-scalardb-cluster.md) +* [Getting Started with ScalarDB Cluster GraphQL](getting-started-with-scalardb-cluster-graphql.md) +* [Getting Started with ScalarDB Cluster SQL via Spring Data JDBC for ScalarDB](getting-started-with-scalardb-cluster-sql-spring-data-jdbc.md) + +For details about developing applications that use ScalarDB Cluster with the Java API, refer to the following: + +* [Developer Guide for ScalarDB Cluster with the Java API](developer-guide-for-scalardb-cluster-with-java-api.md) + +For details about the ScalarDB Cluster gRPC API, refer to the following: + +* [ScalarDB Cluster gRPC API Guide](scalardb-cluster-grpc-api-guide.md) +* [ScalarDB Cluster SQL gRPC API Guide](scalardb-cluster-sql-grpc-api-guide.md) diff --git a/docs/3.12/scalardb-cluster/getting-started-with-scalardb-cluster-sql-spring-data-jdbc.md b/docs/3.12/scalardb-cluster/getting-started-with-scalardb-cluster-sql-spring-data-jdbc.md new file mode 100644 index 00000000..07146494 --- /dev/null +++ b/docs/3.12/scalardb-cluster/getting-started-with-scalardb-cluster-sql-spring-data-jdbc.md @@ -0,0 +1,289 @@ +# Getting Started with ScalarDB Cluster SQL via Spring Data JDBC for ScalarDB + +This tutorial describes how to create a sample application by using ScalarDB Cluster SQL via Spring Data JDBC for ScalarDB. +You'll be using the same sample application as found in the [Sample application of Spring Data JDBC for ScalarDB](https://github.com/scalar-labs/scalardb-samples/tree/main/spring-data-sample). + +## Prerequisites + +- One of the following Java Development Kits (JDKs): + - [Oracle JDK](https://www.oracle.com/java/technologies/downloads/) LTS version (8, 11, or 17) + - [OpenJDK](https://openjdk.org/install/) LTS version (8, 11, or 17) +- ScalarDB Cluster running on a Kubernetes cluster + - We assume that you have a ScalarDB Cluster running on a Kubernetes cluster that you deployed by following the instructions in [Set Up ScalarDB Cluster on Kubernetes by Using a Helm Chart](setup-scalardb-cluster-on-kubernetes-by-using-helm-chart.md). + +{% capture notice--info %} +**Note** + +We recommend using the LTS versions mentioned above, but other non-LTS versions may work. + +In addition, other JDKs should work with ScalarDB, but we haven't tested them. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Sample application + +This tutorial illustrates the process of creating a sample e-commerce application, where items can be ordered and paid for with a credit card by using ScalarDB. +For details about the sample application, see the [sample application for Spring Data JDBC for ScalarDB](https://github.com/scalar-labs/scalardb-samples/tree/main/spring-data-sample#sample-application). + +The following diagram shows the system architecture of the sample application: + +``` + +------------------------------------------------------------------------------------------------------------------------------+ + | [Kubernetes Cluster] | + | | + | [Pod] [Pod] [Pod] | + +------------------------+ | | + | SQL CLI | | +-------+ +-----------------------+ | + | (indirect client mode) | --+ | +---> | Envoy | ---+ +---> | ScalarDB Cluster Node | ---+ | + +------------------------+ | | | +-------+ | | +-----------------------+ | | + | | | | | | | + | | +---------+ | +-------+ | +--------------------+ | +-----------------------+ | +------------+ | + +--+-> | Service | ---+---> | Envoy | ---+---> | Service | ---+---> | ScalarDB Cluster Node | ---+---> | PostgreSQL | | + +------------------------+ | | | (Envoy) | | +-------+ | | (ScalarDB Cluster) | | +-----------------------+ | +------------+ | + | Sample application | | | +---------+ | | +--------------------+ | | | + | with Spring Data JDBC | | | | +-------+ | | +-----------------------+ | | + | for ScalarDB | --+ | +---> | Envoy | ---+ +---> | ScalarDB Cluster Node | ---+ | + | (indirect client mode) | | +-------+ +-----------------------+ | + +------------------------+ | | + +------------------------------------------------------------------------------------------------------------------------------+ +``` + +## Step 1. Clone the ScalarDB Samples repository + +```shell +$ git clone https://github.com/scalar-labs/scalardb-samples.git +$ cd scalardb-samples/spring-data-sample +``` + +## Step 2. Modify `build.gradle` + +To use ScalarDB Cluster, you need to modify `build.gradle`: + +```shell +$ vim build.gradle +``` + +Then, delete the existing dependency for `com.scalar-labs:scalardb-sql-direct-mode:3.12.0` from the `dependencies` section, and add the following dependency to the `dependencies` section: + +```gradle +dependencies { + ... + + implementation 'com.scalar-labs:scalardb-cluster-java-client-sdk:3.12.0' +} +``` + +## Step 3. Modify `scalardb-sql.properties` + +You need to modify `scalardb-sql.properties` to connect to ScalarDB Cluster as well. +But before doing so, you need to get the `EXTERNAL-IP` address of the service resource of Envoy (`scalardb-cluster-envoy`) as follows: + +```shell +$ kubectl get svc scalardb-cluster-envoy +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +scalardb-cluster-envoy LoadBalancer 10.105.121.51 localhost 60053:30641/TCP 16h +``` + +In this case, the `EXTERNAL-IP` address is `localhost`. + +Next, open `scalardb-sql.properties`: + +```shell +$ vim scalardb-sql.properties +``` + +Then, modify `scalardb-sql.properties` as follows: + +```properties +scalar.db.sql.connection_mode=cluster +scalar.db.sql.cluster_mode.contact_points=indirect:localhost +``` + +To connect to ScalarDB Cluster, you need to specify `cluster` for the `scalar.db.sql.connection_mode` property. +In addition, you will use the `indirect` client mode and connect to the service resource of Envoy in this tutorial. +For details about the client modes, see [Developer Guide for ScalarDB Cluster with the Java API](developer-guide-for-scalardb-cluster-with-java-api.md). + +## Step 4. Load a schema + +To load a schema via ScalarDB Cluster SQL, you need to use the dedicated SQL CLI for ScalarDB Cluster (SQL CLI for Cluster). +Using the SQL CLI for Cluster is basically the same as using the [ScalarDB SQL Command Line Interface](https://github.com/scalar-labs/scalardb-sql/blob/main/docs/command-line-interface.md) except the name of the JAR file is different. +You can download the SQL CLI for Cluster from [Releases](https://github.com/scalar-labs/scalardb-cluster/releases/tag/v3.12.0). +After downloading the JAR file, you can run SQL CLI for Cluster with the following command: + +```shell +$ java -jar scalardb-cluster-sql-cli-3.12.0-all.jar --config scalardb-sql.properties --file schema.sql +``` + +## Step 5. Modify `application.properties` + +Then, you need to modify `application.properties` to connect to ScalarDB Cluster as well: + +```shell +$ vim src/main/resources/application.properties +``` + +Similar to `scalardb-sql.properties`, you need to specify `cluster` for the `spring.datasource.driver-class-name` property and use the `indirect` client mode. +To do so, modify `application.properties` as follows: + +```properties +spring.datasource.driver-class-name=com.scalar.db.sql.jdbc.SqlJdbcDriver +spring.datasource.url=jdbc:scalardb:\ +?scalar.db.sql.connection_mode=cluster\ +&scalar.db.sql.cluster_mode.contact_points=indirect:localhost\ +&scalar.db.consensus_commit.isolation_level=SERIALIZABLE\ +&scalar.db.sql.default_namespace_name=sample +``` + +## Step 6. Load the initial data + +Before running the sample application, you need to load the initial data by running the following command: + +```shell +$ ./gradlew run --args="LoadInitialData" +``` + +After the initial data has loaded, the following records should be stored in the tables: + +- For the `sample.customers` table: + +| customer_id | name | credit_limit | credit_total | +|-------------|---------------|--------------|--------------| +| 1 | Yamada Taro | 10000 | 0 | +| 2 | Yamada Hanako | 10000 | 0 | +| 3 | Suzuki Ichiro | 10000 | 0 | + +- For the `sample.items` table: + +| item_id | name | price | +|---------|--------|-------| +| 1 | Apple | 1000 | +| 2 | Orange | 2000 | +| 3 | Grape | 2500 | +| 4 | Mango | 5000 | +| 5 | Melon | 3000 | + +## Step 7. Run the sample application + +Let's start with getting information about the customer whose ID is `1`: + +```shell +$ ./gradlew run --args="GetCustomerInfo 1" +... +{"customer_id":1,"name":"Yamada Taro","credit_limit":10000,"credit_total":0} +... +``` + +Then, place an order for three apples and two oranges by using customer ID `1`. Note that the order format is `:,:,...`: + +```shell +$ ./gradlew run --args="PlaceOrder 1 1:3,2:2" +... +{"order_id":"2358ab35-5819-4f8f-acb1-12e73d97d34e","customer_id":1,"timestamp":1677478005400} +... +``` + +You can see that running this command shows the order ID. + +Let's check the details of the order by using the order ID: + +```shell +$ ./gradlew run --args="GetOrder 2358ab35-5819-4f8f-acb1-12e73d97d34e" +... +{"order_id":"2358ab35-5819-4f8f-acb1-12e73d97d34e","timestamp":1677478005400,"customer_id":1,"customer_name":"Yamada Taro","statements":[{"item_id":1,"item_name":"Apple","price":1000,"count":3,"total":3000},{"item_id":2,"item_name":"Orange","price":2000,"count":2,"total":4000}],"total":7000} +... +``` + +Then, let's place another order and get the order history of customer ID `1`: + +```shell +$ ./gradlew run --args="PlaceOrder 1 5:1" +... +{"order_id":"46062b16-b71b-46f9-a9ff-dc6b0991259b","customer_id":1,"timestamp":1677478201428} +... +$ ./gradlew run --args="GetOrders 1" +... +[{"order_id":"46062b16-b71b-46f9-a9ff-dc6b0991259b","timestamp":1677478201428,"customer_id":1,"customer_name":"Yamada Taro","statements":[{"item_id":5,"item_name":"Melon","price":3000,"count":1,"total":3000}],"total":3000},{"order_id":"2358ab35-5819-4f8f-acb1-12e73d97d34e","timestamp":1677478005400,"customer_id":1,"customer_name":"Yamada Taro","statements":[{"item_id":1,"item_name":"Apple","price":1000,"count":3,"total":3000},{"item_id":2,"item_name":"Orange","price":2000,"count":2,"total":4000}],"total":7000}] +... +``` + +This order history is shown in descending order by timestamp. + +The customer's current `credit_total` is `10000`. Since the customer has now reached their `credit_limit`, which was shown when retrieving their information, they cannot place anymore orders. + +```shell +$ ./gradlew run --args="GetCustomerInfo 1" +... +{"id": 1, "name": "Yamada Taro", "credit_limit": 10000, "credit_total": 10000} +... +$ ./gradlew run --args="PlaceOrder 1 3:1,4:1" +... +java.lang.RuntimeException: Credit limit exceeded. limit:10000, total:17500 + at sample.SampleService.placeOrder(SampleService.java:102) + at sample.SampleService$$FastClassBySpringCGLIB$$1123c447.invoke() + at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218) + at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.invokeJoinpoint(CglibAopProxy.java:793) + at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:163) + at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.proceed(CglibAopProxy.java:763) + at org.springframework.transaction.interceptor.TransactionInterceptor$1.proceedWithInvocation(TransactionInterceptor.java:123) + at org.springframework.transaction.interceptor.TransactionAspectSupport.invokeWithinTransaction(TransactionAspectSupport.java:388) + at org.springframework.transaction.interceptor.TransactionInterceptor.invoke(TransactionInterceptor.java:119) + at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186) + at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.proceed(CglibAopProxy.java:763) + at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:708) + at sample.SampleService$$EnhancerBySpringCGLIB$$a94e1d9.placeOrder() + at sample.command.PlaceOrderCommand.call(PlaceOrderCommand.java:37) + at sample.command.PlaceOrderCommand.call(PlaceOrderCommand.java:13) + at picocli.CommandLine.executeUserObject(CommandLine.java:2041) + at picocli.CommandLine.access$1500(CommandLine.java:148) + at picocli.CommandLine$RunLast.executeUserObjectOfLastSubcommandWithSameParent(CommandLine.java:2461) + at picocli.CommandLine$RunLast.handle(CommandLine.java:2453) + at picocli.CommandLine$RunLast.handle(CommandLine.java:2415) + at picocli.CommandLine$AbstractParseResultHandler.execute(CommandLine.java:2273) + at picocli.CommandLine$RunLast.execute(CommandLine.java:2417) + at picocli.CommandLine.execute(CommandLine.java:2170) + at sample.SampleApp.run(SampleApp.java:26) + at org.springframework.boot.SpringApplication.callRunner(SpringApplication.java:768) + at org.springframework.boot.SpringApplication.callRunners(SpringApplication.java:752) + at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) + at org.springframework.boot.SpringApplication.run(SpringApplication.java:1303) + at org.springframework.boot.SpringApplication.run(SpringApplication.java:1292) + at sample.SampleApp.main(SampleApp.java:35) +... +``` + +After making a payment, the customer will be able to place orders again. + +```shell +$ ./gradlew run --args="Repayment 1 8000" +... +$ ./gradlew run --args="GetCustomerInfo 1" +... +{"customer_id":1,"name":"Yamada Taro","credit_limit":10000,"credit_total":2000} +... +$ ./gradlew run --args="PlaceOrder 1 3:1,4:1" +... +{"order_id":"0350947a-9003-46f2-870e-6aa4b2df0f1f","customer_id":1,"timestamp":1677478728134} +... +``` + +## Source code of the sample application + +To learn more about Spring Data JDBC for ScalarDB, you can check the [source code of the sample application](https://github.com/scalar-labs/scalardb-samples/tree/main/spring-data-sample/src/main). + +## Next steps + +If you have not tried the other ScalarDB Cluster tutorials, we encourage you to read the following: + +* [Getting Started with ScalarDB Cluster](getting-started-with-scalardb-cluster.md) +* [Getting Started with ScalarDB Cluster GraphQL](getting-started-with-scalardb-cluster-graphql.md) +* [Getting Started with ScalarDB Cluster SQL via JDBC](getting-started-with-scalardb-cluster-sql-jdbc.md) + +For details about developing applications that use ScalarDB Cluster with the Java API, refer to the following: + +* [Developer Guide for ScalarDB Cluster with the Java API](developer-guide-for-scalardb-cluster-with-java-api.md) + +For details about the ScalarDB Cluster gRPC API, refer to the following: + +* [ScalarDB Cluster gRPC API Guide](scalardb-cluster-grpc-api-guide.md) +* [ScalarDB Cluster SQL gRPC API Guide](scalardb-cluster-sql-grpc-api-guide.md) diff --git a/docs/3.12/scalardb-cluster/getting-started-with-scalardb-cluster.md b/docs/3.12/scalardb-cluster/getting-started-with-scalardb-cluster.md new file mode 100644 index 00000000..dbee0413 --- /dev/null +++ b/docs/3.12/scalardb-cluster/getting-started-with-scalardb-cluster.md @@ -0,0 +1,395 @@ +# Getting Started with ScalarDB Cluster + +This tutorial describes how to create a sample application that uses [ScalarDB Cluster](index.md) through the Java API. + +## Overview + +The sample e-commerce application shows how users can order and pay for items by using a line of credit. The use case described in this tutorial is the same as the basic [ScalarDB sample](https://github.com/scalar-labs/scalardb-samples/tree/main/scalardb-sample/README.md) but takes advantage of ScalarDB Cluster. + +The following diagram shows the system architecture of the sample application: + +```mermaid +stateDiagram-v2 + state "Schema Loader
(indirect client mode)" as SL + state "Sample application using the Java API
(indirect client mode)" as SA + state "Kubernetes Cluster" as KC + state "Service (Envoy)" as SE + state "Pod" as P1 + state "Pod" as P2 + state "Pod" as P3 + state "Envoy" as E1 + state "Envoy" as E2 + state "Envoy" as E3 + state "Service (ScalarDB Cluster)" as SSC + state "ScalarDB Cluster" as SC1 + state "ScalarDB Cluster" as SC2 + state "ScalarDB Cluster" as SC3 + state "PostgreSQL" as PSQL + SL --> SE + SA --> SE + state KC { + SE --> E1 + SE --> E2 + SE --> E3 + state P1 { + E1 --> SSC + E2 --> SSC + E3 --> SSC + } + SSC --> SC1 + SSC --> SC2 + SSC --> SC3 + state P2 { + SC1 --> PSQL + SC1 --> SC2 + SC1 --> SC3 + SC2 --> PSQL + SC2 --> SC1 + SC2 --> SC3 + SC3 --> PSQL + SC3 --> SC1 + SC3 --> SC2 + } + state P3 { + PSQL + } + } +``` + +### What you can do in this sample application + +The sample application supports the following types of transactions: + +- Get customer information. +- Place an order by using a line of credit. + - Checks if the cost of the order is below the customer's credit limit. + - If the check passes, records the order history and updates the amount the customer has spent. +- Get order information by order ID. +- Get order information by customer ID. +- Make a payment. + - Reduces the amount the customer has spent. + +## Prerequisites + +- One of the following Java Development Kits (JDKs): + - [Oracle JDK](https://www.oracle.com/java/technologies/downloads/) LTS version (8, 11, or 17) + - [OpenJDK](https://openjdk.org/install/) LTS version (8, 11, or 17) +- ScalarDB Cluster running on a Kubernetes cluster + - We assume that you have a ScalarDB Cluster running on a Kubernetes cluster that you deployed by following the instructions in [Set Up ScalarDB Cluster on Kubernetes by Using a Helm Chart](setup-scalardb-cluster-on-kubernetes-by-using-helm-chart.md). + +{% capture notice--info %} +**Note** + +We recommend using the LTS versions mentioned above, but other non-LTS versions may work. + +In addition, other JDKs should work with ScalarDB, but we haven't tested them. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Set up ScalarDB Cluster + +The following sections describe how to set up the sample e-commerce application. + +### Clone the ScalarDB samples repository + +Open **Terminal**, then clone the ScalarDB samples repository by running the following command: + +```console +$ git clone https://github.com/scalar-labs/scalardb-samples +``` + +Then, go to the directory that contains the sample application by running the following command: + +```console +$ cd scalardb-samples/scalardb-sample +``` + +### Modify `build.gradle` + +To use ScalarDB Cluster, open `build.gradle` in your preferred text editor. Then, delete the existing dependency for `com.scalar-labs:scalardb:3.12.0` from the `dependencies` section, and add the following dependency to the `dependencies` section: + +```gradle +dependencies { + ... + + implementation 'com.scalar-labs:scalardb-cluster-java-client-sdk:3.12.0' +} +``` + +### Modify `database.properties` + +You need to modify `database.properties` to connect to ScalarDB Cluster as well. But before doing so, you need to get the `EXTERNAL-IP` address of the Envoy service resource (`scalardb-cluster-envoy`). To get the service resource, run the following command: + +```console +$ kubectl get svc scalardb-cluster-envoy +``` + +You should see a similar output as below, with different values for `CLUSTER-IP`, `PORT(S)`, and `AGE`: + +```console +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +scalardb-cluster-envoy LoadBalancer 10.105.121.51 localhost 60053:30641/TCP 16h +``` + +In this case, the `EXTERNAL-IP` address is `localhost`. + +In `database.properties`, you need to specify `cluster` for the `scalar.db.transaction_manager` property and use `indirect` as the client mode for `scalar.db.contact_points` to connect to the Envoy service resource. + +Open `database.properties` by running the following command: + +```console +$ vim database.properties +``` + +Then, modify `database.properties` as follows: + +```properties +scalar.db.transaction_manager=cluster +scalar.db.contact_points=indirect:localhost +``` + +{% capture notice--info %} +**Note** + +For details about the client modes, see [Developer Guide for ScalarDB Cluster with the Java API](developer-guide-for-scalardb-cluster-with-java-api.md). +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Load the schema + +The database schema (the method in which the data will be organized) for the sample application has already been defined in [`schema.json`](https://github.com/scalar-labs/scalardb-samples/tree/main/scalardb-sample/schema.json). + +To apply the schema, go to the [ScalarDB Cluster Releases](https://github.com/scalar-labs/scalardb-cluster/releases) page and download the ScalarDB Cluster Schema Loader that matches the version of ScalarDB Cluster that you want to use to the `scalardb-samples/scalardb-sample` folder. + +Then, run the following command, replacing `` with the version of the ScalarDB Cluster Schema Loader that you downloaded: + +```console +$ java -jar scalardb-cluster-schema-loader--all.jar --config database.properties -f schema.json --coordinator +``` + +#### Schema details + +As shown in [`schema.json`](https://github.com/scalar-labs/scalardb-samples/tree/main/scalardb-sample/schema.json) for the sample application, all the tables are created in the `sample` namespace. + +- `sample.customers`: a table that manages customer information + - `credit_limit`: the maximum amount of money that the lender will allow the customer to spend from their line of credit + - `credit_total`: the amount of money that the customer has spent from their line of credit +- `sample.orders`: a table that manages order information +- `sample.statements`: a table that manages order statement information +- `sample.items`: a table that manages information for items to be ordered + +The Entity Relationship Diagram for the schema is as follows: + +![ERD](https://scalardb.scalar-labs.com/docs/latest/scalardb-samples/scalardb-sample/images/ERD.png) + +### Load the initial data + +Before running the sample application, you need to load the initial data by running the following command: + +```console +$ ./gradlew run --args="LoadInitialData" +``` + +After the initial data has loaded, the following records should be stored in the tables. + +**`sample.customers` table** + +| customer_id | name | credit_limit | credit_total | +|-------------|---------------|--------------|--------------| +| 1 | Yamada Taro | 10000 | 0 | +| 2 | Yamada Hanako | 10000 | 0 | +| 3 | Suzuki Ichiro | 10000 | 0 | + +**`sample.items` table** + +| item_id | name | price | +|---------|--------|-------| +| 1 | Apple | 1000 | +| 2 | Orange | 2000 | +| 3 | Grape | 2500 | +| 4 | Mango | 5000 | +| 5 | Melon | 3000 | + +## Execute transactions and retrieve data in the sample application + +The following sections describe how to execute transactions and retrieve data in the sample e-commerce application. + +### Get customer information + +Start with getting information about the customer whose ID is `1` by running the following command: + +```console +$ ./gradlew run --args="GetCustomerInfo 1" +``` + +You should see the following output: + +```console +... +{"id": 1, "name": "Yamada Taro", "credit_limit": 10000, "credit_total": 0} +... +``` + +### Place an order + +Then, have customer ID `1` place an order for three apples and two oranges by running the following command: + +{% capture notice--info %} +**Note** + +The order format in this command is `./gradlew run --args="PlaceOrder :,:,..."`. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +```console +$ ./gradlew run --args="PlaceOrder 1 1:3,2:2" +``` + +You should see a similar output as below, with a different UUID for `order_id`, which confirms that the order was successful: + +```console +... +{"order_id": "dea4964a-ff50-4ecf-9201-027981a1566e"} +... +``` + +### Check order details + +Check details about the order by running the following command, replacing `` with the UUID for the `order_id` that was shown after running the previous command: + +```console +$ ./gradlew run --args="GetOrder " +``` + +You should see a similar output as below, with different UUIDs for `order_id` and `timestamp`: + +```console +... +{"order": {"order_id": "dea4964a-ff50-4ecf-9201-027981a1566e","timestamp": 1650948340914,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 1,"item_name": "Apple","price": 1000,"count": 3,"total": 3000},{"item_id": 2,"item_name": "Orange","price": 2000,"count": 2,"total": 4000}],"total": 7000}} +... +``` + +### Place another order + +Place an order for one melon that uses the remaining amount in `credit_total` for customer ID `1` by running the following command: + +```console +$ ./gradlew run --args="PlaceOrder 1 5:1" +``` + +You should see a similar output as below, with a different UUID for `order_id`, which confirms that the order was successful: + +```console +... +{"order_id": "bcc34150-91fa-4bea-83db-d2dbe6f0f30d"} +... +``` + +### Check order history + +Get the history of all orders for customer ID `1` by running the following command: + +```console +$ ./gradlew run --args="GetOrders 1" +``` + +You should see a similar output as below, with different UUIDs for `order_id` and `timestamp`, which shows the history of all orders for customer ID `1` in descending order by timestamp: + +```console +... +{"order": [{"order_id": "dea4964a-ff50-4ecf-9201-027981a1566e","timestamp": 1650948340914,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 1,"item_name": "Apple","price": 1000,"count": 3,"total": 3000},{"item_id": 2,"item_name": "Orange","price": 2000,"count": 2,"total": 4000}],"total": 7000},{"order_id": "bcc34150-91fa-4bea-83db-d2dbe6f0f30d","timestamp": 1650948412766,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 5,"item_name": "Melon","price": 3000,"count": 1,"total": 3000}],"total": 3000}]} +... +``` + +### Check credit total + +Get the credit total for customer ID `1` by running the following command: + +```console +$ ./gradlew run --args="GetCustomerInfo 1" +``` + +You should see the following output, which shows that customer ID `1` has reached their `credit_limit` in `credit_total` and cannot place anymore orders: + +```console +... +{"id": 1, "name": "Yamada Taro", "credit_limit": 10000, "credit_total": 10000} +... +``` + +Try to place an order for one grape and one mango by running the following command: + +```console +$ ./gradlew run --args="PlaceOrder 1 3:1,4:1" +``` + +You should see the following output, which shows that the order failed because the `credit_total` amount would exceed the `credit_limit` amount. + +```console +... +java.lang.RuntimeException: Credit limit exceeded + at sample.Sample.placeOrder(Sample.java:205) + at sample.command.PlaceOrderCommand.call(PlaceOrderCommand.java:33) + at sample.command.PlaceOrderCommand.call(PlaceOrderCommand.java:8) + at picocli.CommandLine.executeUserObject(CommandLine.java:1783) + at picocli.CommandLine.access$900(CommandLine.java:145) + at picocli.CommandLine$RunLast.handle(CommandLine.java:2141) + at picocli.CommandLine$RunLast.handle(CommandLine.java:2108) + at picocli.CommandLine$AbstractParseResultHandler.execute(CommandLine.java:1975) + at picocli.CommandLine.execute(CommandLine.java:1904) + at sample.command.SampleCommand.main(SampleCommand.java:35) +... +``` + +### Make a payment + +To continue making orders, customer ID `1` must make a payment to reduce the `credit_total` amount. + +Make a payment by running the following command: + +```console +$ ./gradlew run --args="Repayment 1 8000" +``` + +Then, check the `credit_total` amount for customer ID `1` by running the following command: + +```console +$ ./gradlew run --args="GetCustomerInfo 1" +``` + +You should see the following output, which shows that a payment was applied to customer ID `1`, reducing the `credit_total` amount: + +```console +... +{"id": 1, "name": "Yamada Taro", "credit_limit": 10000, "credit_total": 2000} +... +``` + +Now that customer ID `1` has made a payment, place an order for one grape and one melon by running the following command: + +```console +$ ./gradlew run --args="PlaceOrder 1 3:1,4:1" +``` + +You should see a similar output as below, with a different UUID for `order_id`, which confirms that the order was successful: + +``` +... +{"order_id": "8911cab3-1c2b-4322-9386-adb1c024e078"} +... +``` + +## Reference + +For details about developing applications that use ScalarDB Cluster with the Java API, refer to [Developer Guide for ScalarDB Cluster with the Java API](developer-guide-for-scalardb-cluster-with-java-api.md). + +## Next steps + +For other ScalarDB Cluster tutorials, see the following: + +* [Getting Started with ScalarDB Cluster GraphQL](getting-started-with-scalardb-cluster-graphql.md) +* [Getting Started with ScalarDB Cluster SQL via JDBC](getting-started-with-scalardb-cluster-sql-jdbc.md) +* [Getting Started with ScalarDB Cluster SQL via Spring Data JDBC for ScalarDB](getting-started-with-scalardb-cluster-sql-spring-data-jdbc.md) diff --git a/docs/3.12/scalardb-cluster/getting-started-with-using-go-for-scalardb-cluster.md b/docs/3.12/scalardb-cluster/getting-started-with-using-go-for-scalardb-cluster.md new file mode 100644 index 00000000..69a2ae8b --- /dev/null +++ b/docs/3.12/scalardb-cluster/getting-started-with-using-go-for-scalardb-cluster.md @@ -0,0 +1,420 @@ +# Getting Started with Using Go for ScalarDB Cluster + +This document explains how to write gRPC client code for ScalarDB Cluster by using Go. + +## Prerequisites + +- [Go](https://go.dev/dl/) (any one of the three latest major releases) +- ScalarDB Cluster running on a Kubernetes cluster + - We assume that you have a ScalarDB Cluster running on a Kubernetes cluster that you deployed by following the instructions in [Set Up ScalarDB Cluster on Kubernetes by Using a Helm Chart](setup-scalardb-cluster-on-kubernetes-by-using-helm-chart.md). + +## Sample application + +This tutorial illustrates the process of creating an electronic money application, where money can be transferred between accounts. + +## Step 1. Create `schema.json` + +The following is a simple example schema. + +Create `schema.json`, and add the following to the file: + +```json +{ + "emoney.account": { + "transaction": true, + "partition-key": [ + "id" + ], + "clustering-key": [], + "columns": { + "id": "TEXT", + "balance": "INT" + } + } +} +``` + +## Step 2. Create `database.properties` + +You need to create `database.properties` for the Schema Loader for ScalarDB Cluster. +But first, you need to get the `EXTERNAL-IP` address of the service resource of the `LoadBalancer` service (`scalardb-cluster-envoy`). + +To see the `EXTERNAL-IP` address, run the following command: + +```console +$ kubectl get svc scalardb-cluster-envoy +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +scalardb-cluster-envoy LoadBalancer 10.105.121.51 localhost 60053:30641/TCP 16h +``` + +In this case, the `EXTERNAL-IP` address is `localhost`. + +Then, create `database.properties`, and add the following to the file: + +```properties +scalar.db.transaction_manager=cluster +scalar.db.contact_points=indirect:localhost +``` + +To connect to ScalarDB Cluster, you need to specify `cluster` for the `scalar.db.transaction_manager` property. +In addition, you will use the `indirect` client mode and connect to the service resource of Envoy in this tutorial. +For details about the client modes, see [Developer Guide for ScalarDB Cluster with the Java API](developer-guide-for-scalardb-cluster-with-java-api.md). + +## Step 3. Load a schema + +To load a schema via ScalarDB Cluster, you need to use the dedicated Schema Loader for ScalarDB Cluster (Schema Loader for Cluster). Using the Schema Loader for Cluster is basically the same as using the [Schema Loader for ScalarDB](https://github.com/scalar-labs/scalardb/blob/master/docs/schema-loader.md) except the name of the JAR file is different. You can download the Schema Loader for Cluster at [Releases](https://github.com/scalar-labs/scalardb-cluster/releases). After downloading the JAR file, you can run the Schema Loader for Cluster with the following command: + +```shell +$ java -jar scalardb-cluster-schema-loader-3.12.0-all.jar --config database.properties -f schema.json --coordinator +``` + +## Step 4. Set up a Go environment + +Follow the [Prerequisites](https://grpc.io/docs/languages/go/quickstart/#prerequisites) section in the gRPC quick-start document to install the following components: + +- Go +- Protocol buffer compiler, `protoc`, version 3.15 or later +- Go plugins for the protocol compiler + +## Step 5. Generate the stub code for ScalarDB Cluster gRPC + +To communicate with the gRPC server for ScalarDB Cluster, you will need to generate the stub code from the proto file. + +First, in a new working directory, create a directory named `scalardb-cluster`, which you will use to generate the gRPC code from, by running the following command: + +```console +$ mkdir scalardb-cluster +``` + +Then, download the [`scalardb-cluster.proto`](https://github.com/scalar-labs/scalardb-cluster/blob/v3.12.0/rpc/src/main/proto/scalardb-cluster.proto) file and save it in the directory that you created. + +Generate the gRPC code by running the following command: + +```console +$ protoc --go_out=. --go_opt=paths=source_relative \ + --go_opt=Mscalardb-cluster/scalardb-cluster.proto=example.com/scalardb-cluster \ + --go-grpc_out=. --go-grpc_opt=paths=source_relative \ + --go-grpc_opt=Mscalardb-cluster/scalardb-cluster.proto=example.com/scalardb-cluster \ + scalardb-cluster/scalardb-cluster.proto +``` + +After running the command, you should see two files in the `scalardb-cluster` subdirectory: `scalardb-cluster.pb.go` and `scalardb-cluster_grpc.pb.go`. + +## Step 6. Write a sample application + +The following is the program that uses the gRPC code. Save it as `main.go` in the working directory. This program does the same thing as the `ElectronicMoney.java` program in [Getting Started with ScalarDB](https://scalardb.scalar-labs.com/docs/latest/getting-started-with-scalardb/). Note that you have to update the value of `SERVER_ADDRESS` based on the `EXTERNAL-IP` value of the ScalarDB Cluster `LoadBalancer` service in your environment. + +```go +package main + +import ( + "context" + "errors" + "flag" + "fmt" + "log" + "os" + "time" + + pb "emoney/scalardb-cluster" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +const ( + SERVER_ADDRESS = "localhost:60053" + NAMESPACE = "emoney" + TABLENAME = "account" + ID = "id" + BALANCE = "balance" +) + +var requestHeader = pb.RequestHeader{HopLimit: 10} + +type TxFn func(ctx context.Context, client pb.DistributedTransactionClient, transactionId string) error + +func withTransaction(fn TxFn) error { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Set up a connection to the server. + conn, err := grpc.Dial(SERVER_ADDRESS, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return err + } + defer conn.Close() + + client := pb.NewDistributedTransactionClient(conn) + + // Begin a transaction + beginResponse, err := client.Begin(ctx, &pb.BeginRequest{RequestHeader: &requestHeader}) + if err != nil { + return err + } + transactionId := beginResponse.TransactionId + + // Execute the function + err = fn(ctx, client, transactionId) + if err != nil { + // Rollback the transaction if there is an error + client.Rollback(ctx, &pb.RollbackRequest{TransactionId: transactionId}) + return err + } + + // Commit the transaction + _, err = client.Commit(ctx, &pb.CommitRequest{RequestHeader: &requestHeader, TransactionId: transactionId}) + return err +} + +func charge(ctx context.Context, client pb.DistributedTransactionClient, transactionId string, id string, amount int) error { + partitionKey := pb.Key{Columns: []*pb.Column{{Name: ID, Value: &pb.Column_TextValue_{TextValue: &pb.Column_TextValue{Value: &id}}}}} + + // Retrieve the current balance for id + get := pb.Get{ + NamespaceName: NAMESPACE, TableName: TABLENAME, + PartitionKey: &partitionKey, ClusteringKey: nil, + GetType: pb.Get_GET_TYPE_GET, + } + getResponse, err := client.Get(ctx, &pb.GetRequest{RequestHeader: &requestHeader, TransactionId: transactionId, Get: &get}) + if err != nil { + return err + } + + // Calculate the balance + balance := int32(amount) + if result := getResponse.GetResult(); result != nil { + for _, column := range result.GetColumns() { + if column.Name == BALANCE { + balance += column.GetIntValue().GetValue() + break + } + } + } + + // Update the balance + put := pb.Put{ + NamespaceName: NAMESPACE, TableName: TABLENAME, + PartitionKey: &partitionKey, ClusteringKey: nil, + Columns: []*pb.Column{ + {Name: BALANCE, Value: &pb.Column_IntValue_{IntValue: &pb.Column_IntValue{Value: &balance}}}, + }, + } + _, err = client.Put(ctx, &pb.PutRequest{RequestHeader: &requestHeader, TransactionId: transactionId, Puts: []*pb.Put{&put}}) + return err +} + +func pay(ctx context.Context, client pb.DistributedTransactionClient, transactionId string, fromId string, toId string, amount int) error { + fromPartitionKey := pb.Key{Columns: []*pb.Column{{Name: ID, Value: &pb.Column_TextValue_{TextValue: &pb.Column_TextValue{Value: &fromId}}}}} + toPartitionKey := pb.Key{Columns: []*pb.Column{{Name: ID, Value: &pb.Column_TextValue_{TextValue: &pb.Column_TextValue{Value: &toId}}}}} + + // Retrieve the current balances for ids + fromGet := pb.Get{ + NamespaceName: NAMESPACE, TableName: TABLENAME, + PartitionKey: &fromPartitionKey, ClusteringKey: nil, + GetType: pb.Get_GET_TYPE_GET, + } + fromGetResponse, err := client.Get(ctx, &pb.GetRequest{RequestHeader: &requestHeader, TransactionId: transactionId, Get: &fromGet}) + if err != nil { + return err + } + toGet := pb.Get{ + NamespaceName: NAMESPACE, TableName: TABLENAME, + PartitionKey: &toPartitionKey, ClusteringKey: nil, + GetType: pb.Get_GET_TYPE_GET, + } + toGetResponse, err := client.Get(ctx, &pb.GetRequest{RequestHeader: &requestHeader, TransactionId: transactionId, Get: &toGet}) + if err != nil { + return err + } + + // Calculate the balances (it assumes that both accounts exist) + var ( + fromBalance int32 + toBalance int32 + ) + for _, column := range fromGetResponse.GetResult().GetColumns() { + if column.Name == BALANCE { + fromBalance = column.GetIntValue().GetValue() + break + } + } + for _, column := range toGetResponse.GetResult().GetColumns() { + if column.Name == BALANCE { + toBalance = column.GetIntValue().GetValue() + break + } + } + newFromBalance := fromBalance - int32(amount) + newToBalance := toBalance + int32(amount) + + if newFromBalance < 0 { + return errors.New(fromId + " doesn't have enough balance.") + } + + // Update the balances + fromPut := pb.Put{ + NamespaceName: NAMESPACE, TableName: TABLENAME, + PartitionKey: &fromPartitionKey, ClusteringKey: nil, + Columns: []*pb.Column{ + {Name: BALANCE, Value: &pb.Column_IntValue_{IntValue: &pb.Column_IntValue{Value: &newFromBalance}}}, + }, + } + toPut := pb.Put{ + NamespaceName: NAMESPACE, TableName: TABLENAME, + PartitionKey: &toPartitionKey, ClusteringKey: nil, + Columns: []*pb.Column{ + {Name: BALANCE, Value: &pb.Column_IntValue_{IntValue: &pb.Column_IntValue{Value: &newToBalance}}}, + }, + } + _, err = client.Put(ctx, &pb.PutRequest{RequestHeader: &requestHeader, TransactionId: transactionId, Puts: []*pb.Put{&fromPut, &toPut}}) + return err +} + +func getBalance(ctx context.Context, client pb.DistributedTransactionClient, transactionId string, id string) (int, error) { + // Retrieve the current balance for id + get := pb.Get{ + NamespaceName: NAMESPACE, TableName: TABLENAME, + PartitionKey: &pb.Key{Columns: []*pb.Column{{Name: ID, Value: &pb.Column_TextValue_{TextValue: &pb.Column_TextValue{Value: &id}}}}}, + ClusteringKey: nil, + GetType: pb.Get_GET_TYPE_GET, + } + getResponse, err := client.Get(ctx, &pb.GetRequest{RequestHeader: &requestHeader, TransactionId: transactionId, Get: &get}) + if err != nil { + return 0, err + } + if getResponse.GetResult() == nil || len(getResponse.GetResult().GetColumns()) == 0 { + return 0, errors.New("Account " + id + " doesn't exist.") + } + + var balance int + for _, column := range getResponse.GetResult().GetColumns() { + if column.Name == BALANCE { + balance = int(column.GetIntValue().GetValue()) + break + } + } + return balance, nil +} + +func main() { + var ( + action = flag.String("action", "", "Action to perform: charge / pay / getBalance") + fromId = flag.String("from", "", "From account (needed for pay)") + toId = flag.String("to", "", "To account (needed for charge and pay)") + id = flag.String("id", "", "Account id (needed for getBalance)") + ) + var amount int + flag.IntVar(&amount, "amount", 0, "Amount to transfer (needed for charge and pay)") + flag.Parse() + + if *action == "charge" { + if *toId == "" || amount < 0 { + printUsageAndExit() + } + err := withTransaction(func(ctx context.Context, client pb.DistributedTransactionClient, txId string) error { + return charge(ctx, client, txId, *toId, amount) + }) + if err != nil { + log.Fatalf("error: %v", err) + } + } else if *action == "pay" { + if *toId == "" || *fromId == "" || amount < 0 { + printUsageAndExit() + } + err := withTransaction(func(ctx context.Context, client pb.DistributedTransactionClient, txId string) error { + return pay(ctx, client, txId, *fromId, *toId, amount) + }) + if err != nil { + log.Fatalf("error: %v", err) + } + } else if *action == "getBalance" { + if *id == "" { + printUsageAndExit() + } + var balance int + err := withTransaction(func(ctx context.Context, client pb.DistributedTransactionClient, txId string) error { + var err error + balance, err = getBalance(ctx, client, txId, *id) + return err + }) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Println(balance) + } else { + fmt.Fprintln(os.Stderr, "Unknown action "+*action) + printUsageAndExit() + } +} + +func printUsageAndExit() { + flag.Usage() + os.Exit(1) +} +``` + +After creating the `main.go` file, you need to create the `go.mod` file by running the following commands: + +```console +go mod init emoney +go mod tidy +``` + +Now, the directory structure should be as follows: + +```text +. +├── go.mod +├── go.sum +├── main.go +└── scalardb-cluster + ├── scalardb-cluster.pb.go + ├── scalardb-cluster.proto + └── scalardb-cluster_grpc.pb.go +``` + +You can then run the program as follows: + +- Charge `1000` to `user1`: + + ```console + $ go run main.go -action charge -amount 1000 -to user1 + ``` + +- Charge `0` to `merchant1` (Just create an account for `merchant1`): + + ```console + $ go run main.go -action charge -amount 0 -to merchant1 + ``` + +- Pay `100` from `user1` to `merchant1`: + + ```console + $ go run main.go -action pay -amount 100 -from user1 -to merchant1 + ``` + +- Get the balance of `user1`: + + ```console + $ go run main.go -action getBalance -id user1 + ``` + +- Get the balance of `merchant1`: + + ```console + $ go run main.go -action getBalance -id merchant1 + ``` + +Note that you can also use `go build` to get the binary and then run it: + +```console +$ go build +$ ./emoney -action getBalance -id user1 +``` + +## References + +- [Getting Started with ScalarDB Cluster](getting-started-with-scalardb-cluster.md) +- [Set Up ScalarDB Cluster on Kubernetes by Using a Helm Chart](setup-scalardb-cluster-on-kubernetes-by-using-helm-chart.md) +- [ScalarDB Cluster gRPC API Guide](scalardb-cluster-grpc-api-guide.md) diff --git a/docs/3.12/scalardb-cluster/getting-started-with-using-python-for-scalardb-cluster.md b/docs/3.12/scalardb-cluster/getting-started-with-using-python-for-scalardb-cluster.md new file mode 100644 index 00000000..625b151a --- /dev/null +++ b/docs/3.12/scalardb-cluster/getting-started-with-using-python-for-scalardb-cluster.md @@ -0,0 +1,463 @@ +# Getting Started with Using Python for ScalarDB Cluster + +This document explains how to write gRPC client code for ScalarDB Cluster by using Python. + +## Prerequisites + +- [Python](https://www.python.org/downloads) 3.7 or later +- ScalarDB Cluster running on a Kubernetes cluster + - We assume that you have a ScalarDB Cluster running on a Kubernetes cluster that you deployed by following the instructions in [Set Up ScalarDB Cluster on Kubernetes by Using a Helm Chart](setup-scalardb-cluster-on-kubernetes-by-using-helm-chart.md). + +## Sample application + +This tutorial illustrates the process of creating an electronic money application, where money can be transferred between accounts. + +## Step 1. Create `schema.json` + +The following is a simple example schema. + +Create `schema.json`, and add the following to the file: + +```json +{ + "emoney.account": { + "transaction": true, + "partition-key": [ + "id" + ], + "clustering-key": [], + "columns": { + "id": "TEXT", + "balance": "INT" + } + } +} +``` + +## Step 2. Create `database.properties` + +You need to create `database.properties` for the Schema Loader for ScalarDB Cluster. +But first, you need to get the `EXTERNAL-IP` address of the service resource of the `LoadBalancer` service (`scalardb-cluster-envoy`). + +To see the `EXTERNAL-IP` address, run the following command: + +```console +$ kubectl get svc scalardb-cluster-envoy +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +scalardb-cluster-envoy LoadBalancer 10.105.121.51 localhost 60053:30641/TCP 16h +``` + +In this case, the `EXTERNAL-IP` address is `localhost`. + +Then, create `database.properties`, and add the following to the file: + +```properties +scalar.db.transaction_manager=cluster +scalar.db.contact_points=indirect:localhost +``` + +To connect to ScalarDB Cluster, you need to specify `cluster` for the `scalar.db.transaction_manager` property. +In addition, you will use the `indirect` client mode and connect to the service resource of Envoy in this tutorial. +For details about the client modes, see [Developer Guide for ScalarDB Cluster with the Java API](developer-guide-for-scalardb-cluster-with-java-api.md). + +## Step 3. Load a schema + +To load a schema via ScalarDB Cluster, you need to use the dedicated Schema Loader for ScalarDB Cluster (Schema Loader for Cluster). Using the Schema Loader for Cluster is basically the same as using the [Schema Loader for ScalarDB](https://github.com/scalar-labs/scalardb/blob/master/docs/schema-loader.md) except the name of the JAR file is different. You can download the Schema Loader for Cluster at [Releases](https://github.com/scalar-labs/scalardb-cluster/releases). After downloading the JAR file, you can run the Schema Loader for Cluster with the following command: + +```shell +$ java -jar scalardb-cluster-schema-loader-3.12.0-all.jar --config database.properties -f schema.json --coordinator +``` + +## Step 4. Set up a Python environment + +You can choose any way you like to manage your Python environment. For the purpose of this guide, we assume that your Python application is running in an environment by using `venv`. + +Create a working directory anywhere, and go there. Then, run the following command to activate `venv` by running the following command: + +```console +$ python3 -m venv venv +$ source venv/bin/activate +``` + +Let's install the gRPC packages with the `pip` command: + +```console +$ pip install grpcio grpcio-tools +``` + +## Step 5. Generate the stub code for ScalarDB Cluster gRPC + +To communicate with the gRPC server for ScalarDB Cluster, you will need to generate the stub code from the proto file. + +First, download the [`scalardb-cluster.proto`](https://github.com/scalar-labs/scalardb-cluster/blob/v3.12.0/rpc/src/main/proto/scalardb-cluster.proto) file, then save it in the working directory. + +You can generate the stub code by running the following command: + +```console +$ python -m grpc_tools.protoc -I . --python_out=. --pyi_out=. --grpc_python_out=. scalardb-cluster.proto +``` + +The following files will be generated: + +- `scalardb_cluster_pb2.py` +- `scalardb_cluster_pb2.pyi` +- `scalardb_cluster_pb2_grpc.py` + +## Step 6. Write a sample application + +The following is the sample Python application (`electronic_money.py`) that uses the stub code. This program does the same thing as the `ElectronicMoney.java` program in [Getting Started with ScalarDB](https://scalardb.scalar-labs.com/docs/latest/getting-started-with-scalardb/). Note that you have to update the value of `SERVER_ADDRESS` based on the `EXTERNAL-IP` value of the ScalarDB Cluster `LoadBalancer` service in your environment. + +```python +import argparse +from typing import Optional + +import grpc + +import scalardb_cluster_pb2_grpc +from scalardb_cluster_pb2 import ( + BeginRequest, + BeginResponse, + Column, + CommitRequest, + Get, + GetRequest, + GetResponse, + Key, + Put, + PutRequest, + RequestHeader, + RollbackRequest, +) + +SERVER_ADDRESS = "localhost:60053" +NAMESPACE = "emoney" +TABLENAME = "account" +ID = "id" +BALANCE = "balance" + +request_header = RequestHeader(hop_limit=10) + + +def charge(id: str, amount: int) -> None: + with grpc.insecure_channel(SERVER_ADDRESS) as channel: + stub = scalardb_cluster_pb2_grpc.DistributedTransactionStub(channel) + + begin_response: BeginResponse = stub.Begin( + BeginRequest(request_header=request_header) + ) + + transaction_id = begin_response.transaction_id + + try: + pkey = Key( + columns=[ + Column( + name=ID, + text_value=Column.TextValue(value=id), + ) + ] + ) + + # Retrieve the current balance for id + get = Get( + namespace_name=NAMESPACE, + table_name=TABLENAME, + get_type=Get.GetType.GET_TYPE_GET, + partition_key=pkey, + ) + get_response: GetResponse = stub.Get( + GetRequest( + request_header=request_header, + transaction_id=transaction_id, + get=get, + ) + ) + + # Calculate the balance + balance = amount + if get_response.result.columns: + balance_column = next( + c for c in get_response.result.columns if c.name == BALANCE + ) + current = balance_column.int_value.value + balance += current + + # Update the balance + put = Put( + namespace_name=NAMESPACE, + table_name=TABLENAME, + partition_key=pkey, + columns=[ + Column(name=BALANCE, int_value=Column.IntValue(value=balance)) + ], + ) + stub.Put( + PutRequest( + request_header=request_header, + transaction_id=transaction_id, + puts=[put], + ) + ) + + # Commit the transaction + stub.Commit( + CommitRequest( + request_header=request_header, + transaction_id=transaction_id, + ) + ) + except Exception as e: + # Rollback the transaction + stub.Rollback( + RollbackRequest( + request_header=request_header, + transaction_id=transaction_id, + ) + ) + raise e + + +def pay(from_id: str, to_id: str, amount: int) -> None: + with grpc.insecure_channel(SERVER_ADDRESS) as channel: + stub = scalardb_cluster_pb2_grpc.DistributedTransactionStub(channel) + + begin_response: BeginResponse = stub.Begin( + BeginRequest(request_header=request_header) + ) + + transaction_id = begin_response.transaction_id + + try: + from_pkey = Key( + columns=[ + Column( + name=ID, + text_value=Column.TextValue(value=from_id), + ) + ] + ) + to_pkey = Key( + columns=[ + Column( + name=ID, + text_value=Column.TextValue(value=to_id), + ) + ] + ) + + # Retrieve the current balances for ids + from_get = Get( + namespace_name=NAMESPACE, + table_name=TABLENAME, + get_type=Get.GetType.GET_TYPE_GET, + partition_key=from_pkey, + ) + from_get_response: GetResponse = stub.Get( + GetRequest( + request_header=request_header, + transaction_id=transaction_id, + get=from_get, + ) + ) + to_get = Get( + namespace_name=NAMESPACE, + table_name=TABLENAME, + get_type=Get.GetType.GET_TYPE_GET, + partition_key=to_pkey, + ) + to_get_response: GetResponse = stub.Get( + GetRequest( + request_header=request_header, + transaction_id=transaction_id, + get=to_get, + ) + ) + + # Calculate the balances (it assumes that both accounts exist) + new_from_balance = ( + next( + c for c in from_get_response.result.columns if c.name == BALANCE + ).int_value.value + - amount + ) + new_to_balance = ( + next( + c for c in to_get_response.result.columns if c.name == BALANCE + ).int_value.value + + amount + ) + + if new_from_balance < 0: + raise RuntimeError(from_id + " doesn't have enough balance.") + + # Update the balances + from_put = Put( + namespace_name=NAMESPACE, + table_name=TABLENAME, + partition_key=from_pkey, + columns=[ + Column( + name=BALANCE, int_value=Column.IntValue(value=new_from_balance) + ) + ], + ) + to_put = Put( + namespace_name=NAMESPACE, + table_name=TABLENAME, + partition_key=to_pkey, + columns=[ + Column( + name=BALANCE, int_value=Column.IntValue(value=new_to_balance) + ) + ], + ) + stub.Put( + PutRequest( + request_header=request_header, + transaction_id=transaction_id, + puts=[from_put, to_put], + ) + ) + + # Commit the transaction (records are automatically recovered in case of failure) + stub.Commit( + CommitRequest( + request_header=request_header, + transaction_id=transaction_id, + ) + ) + except Exception as e: + # Rollback the transaction + stub.Rollback( + RollbackRequest( + request_header=request_header, + transaction_id=transaction_id, + ) + ) + raise e + + +def get_balance(id: str) -> Optional[int]: + with grpc.insecure_channel(SERVER_ADDRESS) as channel: + stub = scalardb_cluster_pb2_grpc.DistributedTransactionStub(channel) + + begin_response: BeginResponse = stub.Begin( + BeginRequest(request_header=request_header) + ) + + transaction_id = begin_response.transaction_id + + try: + # Retrieve the current balance for id + get = Get( + namespace_name=NAMESPACE, + table_name=TABLENAME, + get_type=Get.GetType.GET_TYPE_GET, + partition_key=Key( + columns=[ + Column( + name=ID, + text_value=Column.TextValue(value=id), + ) + ] + ), + ) + get_response: GetResponse = stub.Get( + GetRequest( + request_header=request_header, + transaction_id=transaction_id, + get=get, + ) + ) + + balance = None + if get_response.result.columns: + balance_column = next( + c for c in get_response.result.columns if c.name == BALANCE + ) + balance = balance_column.int_value.value + + # Commit the transaction + stub.Commit( + CommitRequest( + request_header=request_header, + transaction_id=transaction_id, + ) + ) + + return balance + + except Exception as e: + # Rollback the transaction + stub.Rollback( + RollbackRequest( + request_header=request_header, + transaction_id=transaction_id, + ) + ) + raise e + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + subparsers = parser.add_subparsers(required=True) + + parser_charge = subparsers.add_parser("charge") + parser_charge.add_argument("-amount", type=int, required=True) + parser_charge.add_argument("-to", type=str, required=True, dest="to_id") + parser_charge.set_defaults(func=lambda args: charge(args.to_id, args.amount)) + + parser_pay = subparsers.add_parser("pay") + parser_pay.add_argument("-amount", type=int, required=True) + parser_pay.add_argument("-from", type=str, required=True, dest="from_id") + parser_pay.add_argument("-to", type=str, required=True, dest="to_id") + parser_pay.set_defaults( + func=lambda args: pay(args.from_id, args.to_id, args.amount) + ) + + parser_get_balance = subparsers.add_parser("get-balance") + parser_get_balance.add_argument("-id", type=str, required=True) + parser_get_balance.set_defaults(func=lambda args: print(get_balance(args.id))) + + args = parser.parse_args() + args.func(args) + +``` + +You can then run the program as follows: + +- Charge `1000` to `user1`: + + ```console + $ python electronic_money.py charge -amount 1000 -to user1 + ``` + +- Charge `0` to `merchant1` (Just create an account for `merchant1`): + + ```console + $ python electronic_money.py charge -amount 0 -to merchant1 + ``` + +- Pay `100` from `user1` to `merchant1`: + + ```console + $ python electronic_money.py pay -amount 100 -from user1 -to merchant1 + ``` + +- Get the balance of `user1`: + + ```console + $ python electronic_money.py get-balance -id user1 + ``` + +- Get the balance of `merchant1`: + + ```console + $ python electronic_money.py get-balance -id merchant1 + ``` + +## References + +- [Getting Started with ScalarDB Cluster](getting-started-with-scalardb-cluster.md) +- [Set Up ScalarDB Cluster on Kubernetes by Using a Helm Chart](setup-scalardb-cluster-on-kubernetes-by-using-helm-chart.md) +- [ScalarDB Cluster gRPC API Guide](scalardb-cluster-grpc-api-guide.md) diff --git a/docs/3.12/scalardb-cluster/images/direct-kubernetes-client-mode.png b/docs/3.12/scalardb-cluster/images/direct-kubernetes-client-mode.png new file mode 100644 index 00000000..13df52e6 Binary files /dev/null and b/docs/3.12/scalardb-cluster/images/direct-kubernetes-client-mode.png differ diff --git a/docs/3.12/scalardb-cluster/images/indirect-client-mode.png b/docs/3.12/scalardb-cluster/images/indirect-client-mode.png new file mode 100644 index 00000000..4e96108f Binary files /dev/null and b/docs/3.12/scalardb-cluster/images/indirect-client-mode.png differ diff --git a/docs/3.12/scalardb-cluster/images/scalardb-cluster-architecture.png b/docs/3.12/scalardb-cluster/images/scalardb-cluster-architecture.png new file mode 100644 index 00000000..24a0a50d Binary files /dev/null and b/docs/3.12/scalardb-cluster/images/scalardb-cluster-architecture.png differ diff --git a/docs/3.12/scalardb-cluster/index.md b/docs/3.12/scalardb-cluster/index.md new file mode 100644 index 00000000..18eba5e3 --- /dev/null +++ b/docs/3.12/scalardb-cluster/index.md @@ -0,0 +1,63 @@ +# ScalarDB Cluster + +ScalarDB Cluster is a clustering solution for [ScalarDB](https://github.com/scalar-labs/scalardb) that consists of a set of cluster nodes, each of which provides ScalarDB functionality. Each cluster node has a routing mechanism that directs transaction requests to the appropriate cluster node within the cluster. + +## Why ScalarDB Cluster? + +When executing a transaction that spans multiple client requests, such as in microservice transactions, all requests for the transaction must be processed on the same server due to the stateful nature of transaction processing. However, in a distributed environment, routing requests to the same server isn't straightforward because a service typically runs on multiple servers (or hosts) for scalability and availability. In this scenario, all requests within a transaction must be routed to the same server, while different transactions should be distributed to ensure load balancing. + +To address this challenge, a routing mechanism such as session affinity (also known as sticky sessions) needs to be configured. This strategy ensures that requests within a transaction are consistently routed to the same server. Alternatively, you can leverage a bidirectional-streaming RPC by using gRPC. However, it's important to note that implementing these configurations typically requires significant time and effort. In addition, specific configuration adjustments may be required depending on the load balancer product you are using. + +For more details on this topic, see [Request routing in transactions with a two-phase commit interface](https://github.com/scalar-labs/scalardb/blob/master/docs/two-phase-commit-transactions.md#request-routing-in-transactions-with-a-two-phase-commit-interface). + +ScalarDB Cluster addresses this issue by providing a routing mechanism capable of directing requests to the appropriate cluster node within the cluster. Thus, when a cluster node receives a request, the node can route that request to the correct cluster node in the cluster. + +## Architecture + +ScalarDB Cluster consists of a set of cluster nodes, each equipped with ScalarDB functionality. By using this solution, each cluster node can execute transactions independently. + +A notable feature of ScalarDB Cluster is the distribution of transaction requests by using a routing mechanism. When a cluster node receives a request, the node determines whether it's the appropriate cluster node to process the request. If it's not the appropriate node, the node routes the request to the appropriate cluster node within the cluster. To determine the appropriate cluster node, ScalarDB Cluster uses a consistent hashing algorithm. + +Membership management plays a critical role in ScalarDB Cluster. When a cluster node either joins or leaves the cluster, the configuration of the cluster is automatically adjusted to reflect this change. ScalarDB Cluster currently retrieves membership information by using the Kubernetes API. + +{% capture notice--info %} +**Note** + +Currently, ScalarDB Cluster supports running on Kubernetes only. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +![ScalarDB Cluster architecture](images/scalardb-cluster-architecture.png) + +## Getting started + +Before you start the tutorials, you need to set up ScalarDB Cluster. To set up ScalarDB Cluster, see [Set Up ScalarDB Cluster on Kubernetes by Using a Helm Chart](setup-scalardb-cluster-on-kubernetes-by-using-helm-chart.md). + +For tutorials on getting started with ScalarDB Cluster, see the following: + +* [Getting Started with ScalarDB Cluster](getting-started-with-scalardb-cluster.md) +* [Getting Started with ScalarDB Cluster GraphQL](getting-started-with-scalardb-cluster-graphql.md) +* [Getting Started with ScalarDB Cluster SQL via JDBC](getting-started-with-scalardb-cluster-sql-jdbc.md) +* [Getting Started with ScalarDB Cluster SQL via Spring Data JDBC for ScalarDB](getting-started-with-scalardb-cluster-sql-spring-data-jdbc.md) + +## References + +For details about the ScalarDB Cluster Helm Chart, refer to the following: + +* [ScalarDB Cluster Helm Chart](https://github.com/scalar-labs/helm-charts/tree/main/charts/scalardb-cluster) +* [Deploy Scalar products using Scalar Helm Charts](https://github.com/scalar-labs/helm-charts/blob/main/docs/how-to-deploy-scalar-products.md) +* [How to deploy ScalarDB Cluster](https://github.com/scalar-labs/helm-charts/blob/main/docs/how-to-deploy-scalardb-cluster.md) + +For details about the configurations for ScalarDB Cluster, refer to the following: + +* [ScalarDB Cluster Configurations](scalardb-cluster-configurations.md) + +For details about developing applications that use ScalarDB Cluster with the Java API, refer to the following: + +* [Developer Guide for ScalarDB Cluster with the Java API](developer-guide-for-scalardb-cluster-with-java-api.md) + +For details about the ScalarDB Cluster gRPC API, refer to the following: + +* [ScalarDB Cluster gRPC API Guide](scalardb-cluster-grpc-api-guide.md) +* [ScalarDB Cluster SQL gRPC API Guide](scalardb-cluster-sql-grpc-api-guide.md) diff --git a/docs/3.12/scalardb-cluster/scalardb-auth-with-sql.md b/docs/3.12/scalardb-cluster/scalardb-auth-with-sql.md new file mode 100644 index 00000000..9a493b1c --- /dev/null +++ b/docs/3.12/scalardb-cluster/scalardb-auth-with-sql.md @@ -0,0 +1,181 @@ +# ScalarDB Auth with ScalarDB SQL + +ScalarDB Auth is an authentication and authorization mechanism for ScalarDB. + +This document describes how to use ScalarDB Auth with ScalarDB SQL. + +## ScalarDB Auth Overview + +By using ScalarDB Auth, you can create users and grant or revoke their privileges. You can create a user by using the `CREATE USER` command, and you can grant or revoke one's privileges on a table or a namespace by using the `GRANT` or `REVOKE` command, respectively. For details about such data control language (DCL) commands, see [DCL](https://github.com/scalar-labs/scalardb-sql/blob/main/docs/grammar.md#dcl). + +Users can log in to ScalarDB Cluster with a username and a password and execute SQL statements if they have the required privileges. + +ScalarDB Auth supports two types of users: + +- **Superusers:** This type of user has all privileges. Only superusers can create or drop other users and namespaces. +- **Normal users:** This type of user initially doesn't have any privileges, so they need to be granted privileges by a superuser or another user who has the `GRANT` privilege. + +In ScalarDB Auth, the following privileges are available: + +- `SELECT` +- `INSERT` +- `UPDATE` +- `DELETE` +- `CREATE` +- `DROP` +- `TRUNCATE` +- `ALTER` +- `GRANT` + +For details about privileges, see [Which privileges are required for each type of operation](#which-privileges-are-required-for-each-type-of-operation). + +## Configurations + +This section describes the available configurations for ScalarDB Auth. + +### ScalarDB Cluster node configurations + +To enable ScalarDB Auth, you need to set `scalar.db.cluster.auth.enabled` to `true`. + +| Name | Description | Default | +|----------------------------------|------------------------------------|---------| +| `scalar.db.cluster.auth.enabled` | Whether ScalarDB Auth is enabled. | `false` | + +You can also set the following configurations: + +| Name | Description | Default | +|----------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------|--------------------| +| `scalar.db.cluster.auth.cache_expiration_time_millis` | Cache expiration time for auth information in milliseconds. | `60000` (1 minute) | +| `scalar.db.cluster.auth.auth_token_expiration_time_minutes` | Auth token expiration time in minutes. | `1440` (1 day) | +| `scalar.db.cluster.auth.auth_token_gc_thread_interval_minutes` | Auth token garbage collection (GC) thread interval in minutes. | `360` (6 hours) | +| `scalar.db.cluster.auth.pepper` | A secret value added to a password before hashing. If not specified, A password is hashed without pepper. | | + +{% capture notice--info %} +**Note** + +If you enable ScalarDB Auth, you will also need to set `scalar.db.cross_partition_scan.enabled` to `true` for the system namespace (`scalardb` by default) because ScalarDB Auth performs cross-partition scans internally. + +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### ScalarDB Cluster Java client SDK configurations + +To enable ScalarDB Auth on the client side, you need to set `scalar.db.cluster.auth.enabled` to `true`. + +| Name | Description | Default | +|----------------------------------|-----------------------------------|---------| +| `scalar.db.cluster.auth.enabled` | Whether ScalarDB Auth is enabled. | `false` | + +In addition to the configuration in the [ScalarDB Cluster SQL client configurations](developer-guide-for-scalardb-cluster-with-java-api.md#scalardb-cluster-sql-client-configurations) section, you also need to set `scalar.db.sql.cluster_mode.username` and `scalar.db.sql.cluster_mode.password` to specify the username and password of the client. + +| Name | Description | Default | +|---------------------------------------|-----------------------------|---------| +| `scalar.db.sql.cluster_mode.username` | The username of the client. | | +| `scalar.db.sql.cluster_mode.password` | The password of the client. | | + +## Initial user + +When you enable ScalarDB Auth, the initial user `admin` is created and the initial password of that user is `admin`. This user is a superuser and has all privileges. You can log in with this user and create other users if necessary. + +{% capture notice--warning %} +**ATTENTION** + +For security purposes, be sure to change the password of the initial user, especially before deploying to a production environment. + +{% endcapture %} + +
{{ notice--warning | markdownify }}
+ +## Which privileges are required for each type of operation + +The following tables show which privileges are required for each type of operation: + +### DDL + +| Command | Superuser required | Required privileges | +|-------------------------------|--------------------|---------------------| +| `CREATE NAMESPACE` | `true` | | +| `DROP NAMESPACE` | `true` | | +| `CREATE TABLE` | | `CREATE` | +| `DROP TABLE` | | `DROP` | +| `CREATE INDEX` | | `CREATE` | +| `DROP INDEX` | | `DROP` | +| `TRUNCATE TABLE` | | `TRUNCATE` | +| `ALTER TABLE` | | `ALTER` | +| `CREATE COORDINATOR TABLES` | `true` | | +| `DROP COORDINATOR TABLES` | `true` | | +| `TRUNCATE COORDINATOR TABLES` | `true` | | + +### DML + +| Command | Superuser required | Required privileges | +|------------------|--------------------|-----------------------| +| `SELECT` | | `SELECT` | +| `INSERT` | | `INSERT` | +| `UPSERT` | | `INSERT` | +| `UPDATE` | | `SELECT` and `UPDATE` | +| `DELETE` | | `SELECT` and `DELETE` | + +### DCL + +| Command | Superuser required | Required privileges | +|---------------|-----------------------------------------------|----------------------------------------------------------------| +| `CREATE USER` | `true` | | +| `ALTER USER` | `true` (Users can change their own password.) | | +| `DROP USER` | `true` | | +| `GRANT` | | `GRANT` (Users can grant only the privileges that they have.) | +| `REVOKE` | | `GRANT` (Users can revoke only the privileges that they have.) | + +## Wire encryption + +ScalarDB Cluster also supports wire encryption by using Transport Layer Security (TLS). If you enable ScalarDB Auth, enabling wire encryption in production environments to protect the user credentials is strongly recommended. + +This wire encryption feature encrypts: + +* The communications between the ScalarDB Cluster node and clients. +* The communications between all ScalarDB Cluster nodes (the cluster's internal communications). + +This feature uses gRPC's TLS support. For details, see the official gRPC [Security Policy](https://github.com/grpc/grpc-java/blob/master/SECURITY.md). + +### Configurations + +This section describes the available configurations for wire encryption. + +#### ScalarDB Cluster node configurations + +To enable wire encryption, you need to set `scalar.db.cluster.tls.enabled` to `true`. + +| Name | Description | Default | +|---------------------------------|-------------------------------------------|---------| +| `scalar.db.cluster.tls.enabled` | Whether wire encryption (TLS) is enabled. | `false` | + +You also need to set the following configurations: + +| Name | Description | Default | +|-----------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------| +| `scalar.db.cluster.tls.ca_root_cert_pem` | The custom CA root certificate (PEM data) for TLS communication. | | +| `scalar.db.cluster.tls.ca_root_cert_path` | The custom CA root certificate (file path) for TLS communication. | | +| `scalar.db.cluster.tls.override_authority` | The custom authority for TLS communication. This doesn't change what host is actually connected. This is intended for testing, but may safely be used outside of tests as an alternative to DNS overrides. For example, you can specify the hostname presented in the certificate chain file that you set for `scalar.db.cluster.node.tls.cert_chain_path`. | | +| `scalar.db.cluster.node.tls.cert_chain_path` | The certificate chain file used for TLS communication. | | +| `scalar.db.cluster.node.tls.private_key_path` | The private key file used for TLS communication. | | + +To specify the certificate authority (CA) root certificate, you should set either `scalar.db.cluster.tls.ca_root_cert_pem` or `scalar.db.cluster.tls.ca_root_cert_path`. If you set both, `scalar.db.cluster.tls.ca_root_cert_pem` will be used. + +#### ScalarDB Cluster Java client SDK configurations + +To enable wire encryption on the client side, you need to set `scalar.db.cluster.tls.enabled` to `true`. + +| Name | Description | Default | +|---------------------------------|-------------------------------------------|---------| +| `scalar.db.cluster.tls.enabled` | Whether wire encryption (TLS) is enabled. | `false` | + +You also need to set the following configurations: + +| Name | Description | Default | +|-----------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------| +| `scalar.db.cluster.tls.ca_root_cert_pem` | The custom CA root certificate (PEM data) for TLS communication. | | +| `scalar.db.cluster.tls.ca_root_cert_path` | The custom CA root certificate (file path) for TLS communication. | | +| `scalar.db.cluster.tls.override_authority` | The custom authority for TLS communication. This doesn't change what host is actually connected. This is intended for testing, but may safely be used outside of tests as an alternative to DNS overrides. For example, you can specify the hostname presented in the certificate chain file that you set for `scalar.db.cluster.node.tls.cert_chain_path`. | | + +To specify the CA root certificate, you should set either `scalar.db.cluster.tls.ca_root_cert_pem` or `scalar.db.cluster.tls.ca_root_cert_path`. If you set both, `scalar.db.cluster.tls.ca_root_cert_pem` will be used. diff --git a/docs/3.12/scalardb-cluster/scalardb-cluster-configurations.md b/docs/3.12/scalardb-cluster/scalardb-cluster-configurations.md new file mode 100644 index 00000000..eb7aaba6 --- /dev/null +++ b/docs/3.12/scalardb-cluster/scalardb-cluster-configurations.md @@ -0,0 +1,46 @@ +# ScalarDB Cluster Configurations + +This document describes the configurations for ScalarDB Cluster. +ScalarDB Cluster consists of multiple cluster nodes, and you need to configure each cluster node. + +In addition to the configurations described in [Transaction manager configurations](https://github.com/scalar-labs/scalardb/blob/master/docs/configurations.md#transaction-manager-configurations) and [Other configurations](https://github.com/scalar-labs/scalardb/blob/master/docs/configurations.md#other-configurations), you can configure the following configurations for each cluster node. + +## Basic configurations + +The basic configurations for a cluster node are as follows: + +| Name | Description | Default | +|-------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------| +| `scalar.db.cluster.membership.type` | Membership type. Currently, only `KUBERNETES` can be specified. | `KUBERNETES` | +| `scalar.db.cluster.membership.kubernetes.endpoint.namespace_name` | This configuration is for the `KUBERNETES` membership type. Namespace name for the [endpoint resource](https://kubernetes.io/docs/concepts/services-networking/service/#endpoints). | `default` | +| `scalar.db.cluster.membership.kubernetes.endpoint.name` | This configuration is for the `KUBERNETES` membership type. Name of the [endpoint resource](https://kubernetes.io/docs/concepts/services-networking/service/#endpoints) to get the membership info. | | +| `scalar.db.cluster.node.decommissioning_duration_secs` | Decommissioning duration in seconds. | `30` | +| `scalar.db.cluster.node.grpc.max_inbound_message_size` | Maximum message size allowed to be received. | The gRPC default value | +| `scalar.db.cluster.node.grpc.max_inbound_metadata_size` | Maximum size of metadata allowed to be received. | The gRPC default value | +| `scalar.db.cluster.node.port` | Port number of the ScalarDB Cluster node. | `60053` | +| `scalar.db.cluster.node.prometheus_exporter_port` | Port number of the Prometheus exporter. | `9080` | +| `scalar.db.cluster.grpc.deadline_duration_millis` | Deadline duration for gRPC in milliseconds. | `60000` (60 seconds) | +| `scalar.db.cluster.node.standalone_mode.enabled` | Whether standalone mode is enabled. Note that if standalone mode is enabled, the membership configurations (`scalar.db.cluster.membership.*`) will be ignored. | `false` | + +## ScalarDB Cluster GraphQL Configurations + +The configurations for ScalarDB Cluster GraphQL are as follows: + +| Name | Description | Default | +|-----------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------|----------------------| +| `scalar.db.graphql.enabled` | Whether ScalarDB Cluster GraphQL is enabled. | `false` | +| `scalar.db.graphql.port` | Port number of the GraphQL server. | `8080` | +| `scalar.db.graphql.path` | Path component of the URL of the GraphQL endpoint. | `/graphql` | +| `scalar.db.graphql.namespaces` | Comma-separated list of namespaces of tables for which the GraphQL server generates a schema. Note that at least one namespace is required. | | +| `scalar.db.graphql.graphiql` | Whether the GraphQL server serves [GraphiQL](https://github.com/graphql/graphiql) IDE. | `true` | +| `scalar.db.graphql.schema_checking_interval_millis` | Interval in milliseconds at which GraphQL server will rebuild the GraphQL schema if any change is detected in the ScalarDB schema. | `30000` (30 seconds) | + +## ScalarDB Cluster SQL Configurations + +The configurations for ScalarDB Cluster SQL are as follows: + +| Name | Description | Default | +|-----------------------------------------|------------------------------------------|---------| +| `scalar.db.sql.enabled` | Whether ScalarDB Cluster SQL is enabled. | `false` | +| `scalar.db.sql.statement_cache.enabled` | Enable the statement cache. | `false` | +| `scalar.db.sql.statement_cache.size` | Maximum number of cached statements. | `100` | diff --git a/docs/3.12/scalardb-cluster/scalardb-cluster-grpc-api-guide.md b/docs/3.12/scalardb-cluster/scalardb-cluster-grpc-api-guide.md new file mode 100644 index 00000000..c285ff3e --- /dev/null +++ b/docs/3.12/scalardb-cluster/scalardb-cluster-grpc-api-guide.md @@ -0,0 +1,225 @@ +# ScalarDB Cluster gRPC API Guide + +This document describes the ScalarDB Cluster gRPC API. + +ScalarDB Cluster provides a Java API that uses the gRPC API internally. +If you use Java or a JVM language, you can use the Java API instead of the ScalarDB Cluster gRPC API directly. +For details about the Java API, see [Developer Guide for ScalarDB Cluster with the Java API](developer-guide-for-scalardb-cluster-with-java-api.md). + +For details about the services and messages for the ScalarDB Cluster gRPC API, see the definitions in [scalardb-cluster.proto](../rpc/src/main/proto/scalardb-cluster.proto). + +ScalarDB Cluster gRPC API is composed of the following services: + +- `scalardb.cluster.rpc.v1.DistributedTransaction`: Provides a distributed transaction capability for ScalarDB Cluster. +- `scalardb.cluster.rpc.v1.TwoPhaseCommitTransaction`: Provides a two-phase commit transaction capability for ScalarDB Cluster. +- `scalardb.cluster.rpc.v1.DistributedTransactionAdmin`: Provides comprehensive administrative operations. + +The following sections describe how to use each service. + +## Overview of error handling in ScalarDB Cluster gRPC API + +Before describing how to use each service, this section explains how error handling works in ScalarDB Cluster gRPC API. + +ScalarDB Cluster gRPC API employs [Richer error model](https://grpc.io/docs/guides/error/#richer-error-model) for error handling. +This model enables servers to return and enables clients to consume additional error details expressed as one or more protobuf messages. +ScalarDB Cluster gRPC API uses `google.rpc.ErrorInfo`, which is one of the [standard set of error message types](https://github.com/googleapis/googleapis/blob/master/google/rpc/error_details.proto), and puts additional error details in `ErrorInfo` fields. + +`ErrorInfo` has the following fields: + +- `reason`: A string that provides a short description of the error. The following sections describe the possible values of `reason` in each service. +- `domain`: A string that indicates the error's origin. In ScalarDB Cluster gRPC API, this string is always set to `com.scalar.db.cluster`. +- `metadata`: A map of metadata for the specific error. In ScalarDB Cluster gRPC API, a transaction ID with the `transactionId` key in the map is put if the error is related to a transaction. + +If you encounter an error, you can retrieve `ErrorInfo` from `google.rpc.Status` in the gRPC response, but the method for doing so depends on the programming language. +Please refer to the appropriate documentation to understand how to get `ErrorInfo` in your specific programming language. + +## How to use the `DistributedTransaction` service + +The `DistributedTransaction` service provides the following RPCs: + +- `Begin`: Begins a transaction. +- `Get`: Retrieves a record. +- `Scan`: Scans records. +- `Put`: Puts a record. +- `Delete`: Deletes a record. +- `Mutate` Mutates (puts and deletes) multiple records. +- `Commit`: Commits a transaction. +- `Rollback`: Rolls back a transaction. + +First, you call `Begin` to initiate a transaction. +Then, you can call `Get` and `Scan` to read records, `Put` and `Mutate` to write records, and `Delete` and `Mutate` to delete records. +To finalize the transaction, call `Commit`. +Alternatively, you can call `Rollback` at any time before the transaction is committed to cancel it. +By calling `Begin`, you receive a transaction ID in the response, which you can then use to call `Get`, `Scan`, `Put`, `Delete`, `Mutate`, `Commit`, and `Rollback`. + +When you call `Begin`, you can optionally specify a transaction ID. +If you specify a transaction ID, the user is responsible for guaranteeing the uniqueness of the ID. +If you do not specify a transaction ID, ScalarDB Cluster will generate a transaction ID for the transaction. + +You need to set `RequestHeader` for each RPC request. +`RequestHeader` contains a `hop_limit` field, which restricts the number of hops for a request. +The purpose of the `hop_limit` is to prevent infinite loops within the cluster. +Each time a request is forwarded to another cluster node, the `hop_limit` decreases by one. +If the `hop_limit` reaches zero, the request will be rejected. + +### Error handling + +The table below shows the status code and the possible values of `reason` in `ErrorInfo` in each RPC in the `DistributedTransaction` service: + +| RPC | Status code | `reason` in `ErrorInfo` | Description | +|--------------------------------|---------------------|----------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Begin | INVALID_ARGUMENT | ILLEGAL_ARGUMENT | The argument in the request message is invalid. | +| Begin | FAILED_PRECONDITION | ILLEGAL_STATE | The RPC was called in an invalid state. | +| Begin | INTERNAL | HOP_LIMIT_EXCEEDED | The hop limit was exceeded. This occurs when the routing information between cluster nodes is inconsistent. The error is usually resolved in a short amount of time, so you can retry the transaction from the beginning after some time has passed since encountering this error. | +| Begin | INTERNAL | INTERNAL_ERROR | The operation has failed due to transient or nontransient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is nontransient. | +| Get, Scan, Put, Delete, Mutate | INVALID_ARGUMENT | ILLEGAL_ARGUMENT | The argument in the request message is invalid. | +| Get, Scan, Put, Delete, Mutate | FAILED_PRECONDITION | ILLEGAL_STATE | The RPC was called in an invalid state. | +| Get, Scan, Put, Delete, Mutate | NOT_FOUND | TRANSACTION_NOT_FOUND | The transaction associated with the specified transaction ID was not found. This error indicates that the transaction has expired or the routing information has been updated due to cluster topology changes. In this case, please retry the transaction from the beginning. | +| Get, Scan, Put, Delete, Mutate | INTERNAL | HOP_LIMIT_EXCEEDED | The hop limit was exceeded. This occurs when the routing information between cluster nodes is inconsistent. The error is usually resolved in a short amount of time, so you can retry the transaction from the beginning after some time has passed since encountering this error. | +| Get, Scan, Put, Delete, Mutate | FAILED_PRECONDITION | TRANSACTION_CONFLICT | A transaction conflict occurred. If you encounter this error, please retry the transaction from the beginning. | +| Get, Scan, Put, Delete, Mutate | INTERNAL | INTERNAL_ERROR | The operation has failed due to transient or nontransient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is nontransient. | +| Put, Delete, Mutate | FAILED_PRECONDITION | UNSATISFIED_CONDITION | The mutation condition is not satisfied. | +| Commit | INVALID_ARGUMENT | ILLEGAL_ARGUMENT | The argument in the request message is invalid. | +| Commit | FAILED_PRECONDITION | ILLEGAL_STATE | The RPC was called in an invalid state. | +| Commit | NOT_FOUND | TRANSACTION_NOT_FOUND | The transaction associated with the specified transaction ID was not found. This error indicates that the transaction has expired or the routing information has been updated due to cluster topology changes. In this case, please retry the transaction from the beginning. | +| Commit | INTERNAL | HOP_LIMIT_EXCEEDED | The hop limit was exceeded. This occurs when the routing information between cluster nodes is inconsistent. The error is usually resolved in a short amount of time, so you can retry the transaction from the beginning after some time has passed since encountering this error. | +| Commit | FAILED_PRECONDITION | TRANSACTION_CONFLICT | A transaction conflict occurred. If you encounter this error, please retry the transaction from the beginning. | +| Commit | INTERNAL | UNKNOWN_TRANSACTION_STATUS | The status of the transaction is unknown (it is uncertain whether the transaction was successfully committed or not). In this situation, you need to check whether the transaction was successfully committed, and if not, to retry it. The responsibility for determining the transaction status rests with the users. It may be beneficial to create a transaction status table and update it in conjunction with other application data so that you can determine the status of a transaction from the table itself. | +| Commit | INTERNAL | INTERNAL_ERROR | The operation has failed due to transient or nontransient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is nontransient. | +| Rollback | INVALID_ARGUMENT | ILLEGAL_ARGUMENT | The argument in the request message is invalid. | +| Rollback | FAILED_PRECONDITION | ILLEGAL_STATE | The RPC was called in an invalid state. | +| Rollback | NOT_FOUND | TRANSACTION_NOT_FOUND | The transaction associated with the specified transaction ID was not found. In case of a rollback, you do not need to retry the transaction because the transaction will expire automatically. | +| Rollback | INTERNAL | HOP_LIMIT_EXCEEDED | The hop limit was exceeded. In case of a rollback, you do not need to retry the transaction because the transaction will expire automatically. | +| Rollback | INTERNAL | INTERNAL_ERROR | The operation has failed due to transient or nontransient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is nontransient. | + +If you encounter an error, you should roll back the transaction, except in the case of `Begin`. +Then, you can retry the transaction from the beginning for the errors that can be resolved by retrying. + +Besides the errors listed above, you may encounter errors returned by the gRPC library. +In these cases, the response will not contain `ErrorInfo`. +For details, refer to the [gRPC documentation](https://grpc.io/docs/guides/error/#error-status-codes). + +You can set a deadline for each RPC in gRPC. +If the deadline is exceeded, you will receive a `DEADLINE_EXCEEDED` error. +In general, you should roll back the transaction in this situation, unless the RPC is `Begin` or `Commit`. +In the case of `Commit`, the situation is equivalent to `UNKNOWN_TRANSACTION_STATUS` (it is uncertain whether the transaction was successfully committed or not), and you must handle the error in the same way. + +## How to use the `TwoPhaseCommitTransaction` service + +The `TwoPhaseCommitTransaction` service provides the following RPCs: + +- `Begin`: Begins a transaction. +- `Join`: Joins a transaction. +- `Get`: Retrieves a record. +- `Scan`: Scans records. +- `Put`: Puts a record. +- `Delete`: Deletes a record. +- `Mutate`: Mutates (puts and deletes) multiple records. +- `Prepare`: Prepares a transaction. +- `Validate`: Validates a transaction. +- `Commit`: Commits a transaction. +- `Rollback`: Rolls back a transaction. + +First, you call `Begin` to initiate a transaction if you are the coordinator process. +Alternatively, if you are a participant process, you can call `Join` to take part in a transaction that the coordinator has already begun. +Then, you can call `Get` and `Scan` to read records, `Put` and `Mutate` to write records, and `Delete` and `Mutate` to delete records. +To finalize the transaction, call `Prepare`, `Validate`, and then `Commit` in order. +Alternatively, you can call `Rollback` at any time before the transaction is committed to cancel it. +By calling `Begin` or `Join`, you receive a transaction ID in the response, which you can then use to call `Get`, `Scan`, `Put`, `Delete`, `Mutate`, `Prepare`, `Validate`, `Commit`, and `Rollback`. + +When you call `Begin`, you can optionally specify a transaction ID. +If you specify a transaction ID, the user is responsible for guaranteeing the uniqueness of the ID. +If you do not specify a transaction ID, ScalarDB Cluster will generate a transaction ID for the transaction. + +You need to set `RequestHeader` for each RPC request. +`RequestHeader` contains a `hop_limit` field, which restricts the number of hops for a request. +The purpose of the `hop_limit` is to prevent infinite loops within the cluster. +Each time a request is forwarded to another cluster node, the `hop_limit` decreases by one. +If the `hop_limit` reaches zero, the request will be rejected. + +### Error handling + +The table below shows the status code and the possible values of `reason` in `ErrorInfo` in each RPC in the `TwoPhaseCommitTransaction` service: + +| RPC | Status code | `reason` in `ErrorInfo` | Description | +|--------------------------------|---------------------|----------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Begin, Join | INVALID_ARGUMENT | ILLEGAL_ARGUMENT | The argument in the request message is invalid. | +| Begin, Join | FAILED_PRECONDITION | ILLEGAL_STATE | The RPC was called in an invalid state. | +| Begin, Join | INTERNAL | HOP_LIMIT_EXCEEDED | The hop limit was exceeded. This occurs when the routing information between cluster nodes is inconsistent. The error is usually resolved in a short amount of time, so you can retry the transaction from the beginning after some time has passed since encountering this error. | +| Begin, Join | INTERNAL | INTERNAL_ERROR | The operation has failed due to transient or nontransient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is nontransient. | +| Get, Scan, Put, Delete, Mutate | INVALID_ARGUMENT | ILLEGAL_ARGUMENT | The argument in the request message is invalid. | +| Get, Scan, Put, Delete, Mutate | FAILED_PRECONDITION | ILLEGAL_STATE | The RPC was called in an invalid state. | +| Get, Scan, Put, Delete, Mutate | NOT_FOUND | TRANSACTION_NOT_FOUND | The transaction associated with the specified transaction ID was not found. This indicates that the transaction has expired or the routing information has been updated due to cluster topology changes. In this case, please retry the transaction from the beginning. | +| Get, Scan, Put, Delete, Mutate | INTERNAL | HOP_LIMIT_EXCEEDED | The hop limit was exceeded. This occurs when the routing information between cluster nodes is inconsistent. The error is usually resolved in a short amount of time, so you can retry the transaction from the beginning after some time has passed since encountering this error. | +| Get, Scan, Put, Delete, Mutate | FAILED_PRECONDITION | TRANSACTION_CONFLICT | A transaction conflict occurred. If you encounter this error, please retry the transaction from the beginning. | +| Get, Scan, Put, Delete, Mutate | INTERNAL | INTERNAL_ERROR | The operation has failed due to transient or nontransient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is nontransient. | +| Put, Delete, Mutate | FAILED_PRECONDITION | UNSATISFIED_CONDITION | The mutation condition is not satisfied. | +| Prepare, Validate | INVALID_ARGUMENT | ILLEGAL_ARGUMENT | The argument in the request message is invalid. | +| Prepare, Validate | FAILED_PRECONDITION | ILLEGAL_STATE | The RPC was called in an invalid state. | +| Prepare, Validate | NOT_FOUND | TRANSACTION_NOT_FOUND | The transaction associated with the specified transaction ID was not found. This error indicates that the transaction has expired or the routing information has been updated due to cluster topology changes. In this case, please retry the transaction from the beginning. | +| Prepare, Validate | INTERNAL | HOP_LIMIT_EXCEEDED | The hop limit was exceeded. This occurs when the routing information between cluster nodes is inconsistent. The error is usually resolved in a short amount of time, so you can retry the transaction from the beginning after some time has passed since encountering this error. | +| Prepare, Validate | FAILED_PRECONDITION | TRANSACTION_CONFLICT | A transaction conflict occurred. If you encounter this error, please retry the transaction from the beginning. | +| Prepare, Validate | INTERNAL | INTERNAL_ERROR | The operation has failed due to transient or nontransient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is nontransient. | +| Commit | INVALID_ARGUMENT | ILLEGAL_ARGUMENT | The argument in the request message is invalid. | +| Commit | FAILED_PRECONDITION | ILLEGAL_STATE | The RPC was called in an invalid state. | +| Commit | NOT_FOUND | TRANSACTION_NOT_FOUND | The transaction associated with the specified transaction ID was not found. This error indicates that the transaction has expired or the routing information has been updated due to cluster topology changes. In this case, please retry the transaction from the beginning. | +| Commit | INTERNAL | HOP_LIMIT_EXCEEDED | The hop limit was exceeded. This occurs when the routing information between cluster nodes is inconsistent. The error is usually resolved in a short amount of time, so you can retry the transaction from the beginning after some time has passed since encountering this error. | +| Commit | FAILED_PRECONDITION | TRANSACTION_CONFLICT | A transaction conflict occurred. If you encounter this error, please retry the transaction from the beginning. | +| Commit | INTERNAL | UNKNOWN_TRANSACTION_STATUS | The status of the transaction is unknown (it is uncertain whether the transaction was successfully committed or not). In this situation, you need to check whether the transaction was successfully committed, and if not, to retry it. The responsibility for determining the transaction status rests with the users. It may be beneficial to create a transaction status table and update it in conjunction with other application data so that you can determine the status of a transaction from the table itself. | +| Commit | INTERNAL | INTERNAL_ERROR | The operation has failed due to transient or nontransient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is nontransient. | +| Rollback | INVALID_ARGUMENT | ILLEGAL_ARGUMENT | The argument in the request message is invalid. | +| Rollback | FAILED_PRECONDITION | ILLEGAL_STATE | The RPC was called in an invalid state. | +| Rollback | NOT_FOUND | TRANSACTION_NOT_FOUND | The transaction associated with the specified transaction ID was not found. In case of a rollback, you do not need to retry the transaction because the transaction will expire automatically. | +| Rollback | INTERNAL | HOP_LIMIT_EXCEEDED | The hop limit was exceeded. In case of a rollback, you do not need to retry the transaction because the transaction will expire automatically. | +| Rollback | INTERNAL | INTERNAL_ERROR | The operation has failed due to transient or nontransient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is nontransient. | + +If you encounter an error, you should roll back the transaction, except in the case of `Begin` or `Join`. +Then, you can retry the transaction from the beginning for the errors that can be resolved by retrying. + +Besides the errors listed above, you may encounter errors returned by the gRPC library. +In these cases, the response will not contain `ErrorInfo`. +For details, refer to the [gRPC documentation](https://grpc.io/docs/guides/error/#error-status-codes). + +You can set a deadline for each RPC in gRPC. +If the deadline is exceeded, you will receive a `DEADLINE_EXCEEDED` error. +In general, you should roll back the transaction in this situation, unless the RPC is `Begin`, `Join`, or `Commit`. +In the case of `Commit`, the situation is equivalent to `UNKNOWN_TRANSACTION_STATUS` (it is uncertain whether the transaction was successfully committed or not), and you must handle the error in the same way. + +## How to use the `DistributedTransactionAdmin` service + +The `DistributedTransactionAdmin` service provides the following RPCs: + +- `CreateNamespace`: Creates a namespace. +- `DropNamespace`: Drops a namespace. +- `NamespaceExists`: Returns whether the specified namespace exists or not. +- `CreateTable`: Creates a table. +- `DropTable`: Drops a table. +- `TruncateTable`: Truncates a table. +- `TableExists`: Returns whether the specified table exists or not. +- `CreateIndex`: Creates an index. +- `DropIndex`: Drops an index. +- `IndexExists`: Returns whether the specified index exists or not. +- `RepairTable`: Repairs a namespace that may be in an unknown state. +- `AddNewColumnToTable`: Adds a new column to a table. +- `CreateCoordinatorTables`: Creates the Coordinator tables. +- `DropCoordinatorTables`: Drops the Coordinator tables. +- `TruncateCoordinatorTables`: Truncates the Coordinator tables. +- `CoordinatorTablesExist`: Returns whether the Coordinator tables exist or not. +- `RepairCoordinatorTables`: Repairs the Coordinator tables. +- `GetTableMetadata`: Returns table metadata of the specified table. +- `GetNamespaceTableNames`: Returns tables in the specified namespace. +- `ImportTable`: Imports an existing table that is not managed by ScalarDB. + +### Error handling + +The table below shows the status code and the possible values of `reason` in `ErrorInfo` for all RPCs in the `DistributedTransactionAdmin` service: + +| Status code | `reason` in `ErrorInfo` | Description | +|---------------------|----------------------------|-------------------------------------------------| +| INVALID_ARGUMENT | ILLEGAL_ARGUMENT | The argument in the request message is invalid. | +| FAILED_PRECONDITION | ILLEGAL_STATE | The RPC was called in an invalid state. | +| INTERNAL | INTERNAL_ERROR | The operation has failed. | + +Besides the errors listed above, you may encounter errors returned by the gRPC library. +In these cases, the response will not contain `ErrorInfo`. +For details, refer to the [gRPC documentation](https://grpc.io/docs/guides/error/#error-status-codes). diff --git a/docs/3.12/scalardb-cluster/scalardb-cluster-sql-grpc-api-guide.md b/docs/3.12/scalardb-cluster/scalardb-cluster-sql-grpc-api-guide.md new file mode 100644 index 00000000..076b36ad --- /dev/null +++ b/docs/3.12/scalardb-cluster/scalardb-cluster-sql-grpc-api-guide.md @@ -0,0 +1,209 @@ +# ScalarDB Cluster SQL gRPC API Guide + +This document describes the ScalarDB Cluster SQL gRPC API. + +ScalarDB Cluster SQL provides a Java API that uses the gRPC API internally. +If you use Java or a JVM language, you can use the Java API instead of the ScalarDB Cluster SQL gRPC API directly. +For details about the Java API, see [Developer Guide for ScalarDB Cluster with the Java API](developer-guide-for-scalardb-cluster-with-java-api.md). + +For details about the services and messages for the ScalarDB Cluster gRPC API, see the definitions in [scalardb-cluster.proto](../rpc/src/main/proto/scalardb-cluster.proto). + +ScalarDB Cluster SQL gRPC API is composed of the following services: + +- `scalardb.cluster.rpc.v1.sql.SqlTransaction`: Provides a transaction capability for ScalarDB Cluster SQL. +- `scalardb.cluster.rpc.v1.sql.SqlTwoPhaseCommitTransaction`: Provides a two-phase commit transaction capability for ScalarDB Cluster SQL. +- `scalardb.cluster.rpc.v1.sql.Metadata`: Provides a metadata view of ScalarDB Cluster SQL. + +The following sections describe how to use each service. + +## Overview of error handling in ScalarDB Cluster SQL gRPC API + +Before describing how to use each service, this section explains how error handling works in ScalarDB Cluster SQL gRPC API. + +ScalarDB Cluster SQL gRPC API employs [Richer error model](https://grpc.io/docs/guides/error/#richer-error-model) for error handling. +This model enables servers to return and enables clients to consume additional error details expressed as one or more protobuf messages. +ScalarDB Cluster SQL gRPC API uses `google.rpc.ErrorInfo`, which is one of the [standard set of error message types](https://github.com/googleapis/googleapis/blob/master/google/rpc/error_details.proto), and puts additional error details in `ErrorInfo` fields. + +`ErrorInfo` has the following fields: + +- `reason`: A string that provides a short description of the error. The following sections describe the possible values of `reason` in each service. +- `domain`: A string that indicates the error's origin. In ScalarDB Cluster SQL gRPC API, this string is always set to `com.scalar.db.cluster.sql`. +- `metadata`: A map of metadata for the specific error. In ScalarDB Cluster SQL gRPC API, a transaction ID with the `transactionId` key in the map is put if the error is related to a transaction. + +If you encounter an error, you can retrieve `ErrorInfo` from `google.rpc.Status` in the gRPC response, but the method for doing so depends on the programming language. +Please refer to the appropriate documentation to understand how to get `ErrorInfo` in your specific programming language. + +## How to use the `SqlTransaction` service + +The `SqlTransaction` service provides the following RPCs: + +- `Begin`: Begins a transaction. +- `Execute` Executes a SQL statement. +- `Commit`: Commits a transaction. +- `Rollback`: Rolls back a transaction. + +First, you call `Begin` to initiate a transaction. +Following that, you can call `Execute` to read, write, and delete records. +To finalize the transaction, call `Commit`. +Alternatively, you can call `Rollback` at any time before the transaction is committed to cancel it. +By calling `Begin`, you receive a transaction ID in the response, which you can then use to call `Execute`, `Commit`, and `Rollback`. + +Also, you can call `Execute` without a transaction ID to execute a one-shot transaction. +In this case, the transaction is automatically committed after it is executed. +You can use this method to execute DDL statements as well. +For details on the supported SQL statements, refer to [ScalarDB SQL Grammar](https://github.com/scalar-labs/scalardb-sql/blob/main/docs/grammar.md). +Please note, however, that `Execute` supports only DML and DDL statements. + +When you call `Begin`, you can optionally specify a transaction ID. +If you specify a transaction ID, the user is responsible for guaranteeing the uniqueness of the ID. +If you do not specify a transaction ID, ScalarDB Cluster will generate a transaction ID for the transaction. + +You need to set `RequestHeader` for each RPC request. +`RequestHeader` contains a `hop_limit` field, which restricts the number of hops for a request. +The purpose of the `hop_limit` is to prevent infinite loops within the cluster. +Each time a request is forwarded to another cluster node, the `hop_limit` decreases by one. +If the `hop_limit` reaches zero, the request will be rejected. + +### Error handling + +The table below shows the status code and the possible values of `reason` in `ErrorInfo` in each RPC in the `SqlTransaction` service: + +| RPC | Status code | `reason` in `ErrorInfo` | Description | +|----------|---------------------|----------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Begin | INVALID_ARGUMENT | ILLEGAL_ARGUMENT | The argument in the request message is invalid. | +| Begin | FAILED_PRECONDITION | ILLEGAL_STATE | The RPC was called in an invalid state. | +| Begin | INTERNAL | HOP_LIMIT_EXCEEDED | The hop limit was exceeded. This occurs when the routing information between cluster nodes is inconsistent. The error is usually resolved in a short amount of time, so you can retry the transaction from the beginning after some time has passed since encountering this error. | +| Begin | INTERNAL | INTERNAL_ERROR | The operation has failed due to transient or nontransient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is nontransient. | +| Execute | INVALID_ARGUMENT | ILLEGAL_ARGUMENT | The argument in the request message is invalid. | +| Execute | FAILED_PRECONDITION | ILLEGAL_STATE | The RPC was called in an invalid state. | +| Execute | NOT_FOUND | TRANSACTION_NOT_FOUND | The transaction associated with the specified transaction ID was not found. This error indicates that the transaction has expired or the routing information has been updated due to cluster topology changes. In this case, please retry the transaction from the beginning. | +| Execute | INTERNAL | HOP_LIMIT_EXCEEDED | The hop limit was exceeded. This occurs when the routing information between cluster nodes is inconsistent. The error is usually resolved in a short amount of time, so you can retry the transaction from the beginning after some time has passed since encountering this error. | +| Execute | FAILED_PRECONDITION | TRANSACTION_CONFLICT | A transaction conflict occurred. If you encounter this error, please retry the transaction from the beginning. | +| Execute | INTERNAL | INTERNAL_ERROR | The operation has failed due to transient or nontransient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is nontransient. | +| Commit | INVALID_ARGUMENT | ILLEGAL_ARGUMENT | The argument in the request message is invalid. | +| Commit | FAILED_PRECONDITION | ILLEGAL_STATE | The RPC was called in an invalid state. | +| Commit | NOT_FOUND | TRANSACTION_NOT_FOUND | The transaction associated with the specified transaction ID was not found. This error indicates that the transaction has expired or the routing information has been updated due to cluster topology changes. In this case, please retry the transaction from the beginning. | +| Commit | INTERNAL | HOP_LIMIT_EXCEEDED | The hop limit was exceeded. This occurs when the routing information between cluster nodes is inconsistent. The error is usually resolved in a short amount of time, so you can retry the transaction from the beginning after some time has passed since encountering this error. | +| Commit | FAILED_PRECONDITION | TRANSACTION_CONFLICT | A transaction conflict occurred. If you encounter this error, please retry the transaction from the beginning. | +| Commit | INTERNAL | UNKNOWN_TRANSACTION_STATUS | The status of the transaction is unknown (it is uncertain whether the transaction was successfully committed or not). In this situation, you need to check whether the transaction was successfully committed, and if not, to retry it. The responsibility for determining the transaction status rests with the users. It may be beneficial to create a transaction status table and update it in conjunction with other application data so that you can determine the status of a transaction from the table itself. | +| Commit | INTERNAL | INTERNAL_ERROR | The operation has failed due to transient or nontransient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is nontransient. | +| Rollback | INVALID_ARGUMENT | ILLEGAL_ARGUMENT | The argument in the request message is invalid. | +| Rollback | FAILED_PRECONDITION | ILLEGAL_STATE | The RPC was called in an invalid state. | +| Rollback | NOT_FOUND | TRANSACTION_NOT_FOUND | The transaction associated with the specified transaction ID was not found. In case of a rollback, you do not need to retry the transaction because the transaction will expire automatically. | +| Rollback | INTERNAL | HOP_LIMIT_EXCEEDED | The hop limit was exceeded. In case of a rollback, you do not need to retry the transaction because the transaction will expire automatically. | +| Rollback | INTERNAL | INTERNAL_ERROR | The operation has failed due to transient or nontransient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is nontransient. | + +If you encounter an error, you should roll back the transaction, except in the case of `Begin`. +Then, you can retry the transaction from the beginning for the errors that can be resolved by retrying. + +Besides the errors listed above, you may encounter errors returned by the gRPC library. +In these cases, the response will not contain `ErrorInfo`. +For details, refer to the [gRPC documentation](https://grpc.io/docs/guides/error/#error-status-codes). + +You can set a deadline for each RPC in gRPC. +If the deadline is exceeded, you will receive a `DEADLINE_EXCEEDED` error. +In general, you should roll back the transaction in this situation, unless the RPC is `Begin` or `Commit`. +In the case of `Commit`, the situation is equivalent to `UNKNOWN_TRANSACTION_STATUS` (it is uncertain whether the transaction was successfully committed or not), and you must handle the error in the same way. + +## How to use the `SqlTwoPhaseCommitTransaction` service + +The `SqlTwoPhaseCommitTransaction` service provides the following RPCs: + +- `Begin`: Begins a transaction. +- `Join`: Joins a transaction. +- `Execute` Executes a SQL statement. +- `Prepare`: Prepares a transaction. +- `Validate`: Validates a transaction. +- `Commit`: Commits a transaction. +- `Rollback`: Rolls back a transaction. + +First, you call `Begin` to initiate a transaction if you are the coordinator process. +Alternatively, if you are a participant process, you can call `Join` to take part in a transaction that the coordinator has already begun. +Following that, you can call `Execute` to read, write, and delete records. +To finalize the transaction, call `Prepare`, `Validate`, and then `Commit` in order. +Alternatively, you can call `Rollback` at any time before the transaction is committed to cancel it. +By calling `Begin` or `Join`, you receive a transaction ID in the response, which you can then use to call `Execute`, `Prepare`, `Validate`, `Commit`, and `Rollback`. + +In addition, you can call `Execute` without a transaction ID to execute a one-shot transaction. +In this case, the transaction is automatically committed after it is executed. +You can use this method to execute DDL statements as well. For details on the supported SQL statements, refer to [ScalarDB SQL Grammar](https://github.com/scalar-labs/scalardb-sql/blob/main/docs/grammar.md). +Please note, however, that `Execute` supports only DML and DDL statements. + +When you call `Begin`, you can optionally specify a transaction ID. +If you specify a transaction ID, the user is responsible for guaranteeing the uniqueness of the ID. +If you do not specify a transaction ID, ScalarDB Cluster will generate a transaction ID for the transaction. + +You need to set `RequestHeader` for each RPC request. +`RequestHeader` contains a `hop_limit` field, which restricts the number of hops for a request. +The purpose of the `hop_limit` is to prevent infinite loops within the cluster. +Each time a request is forwarded to another cluster node, the `hop_limit` decreases by one. +If the `hop_limit` reaches zero, the request will be rejected. + +### Error handling + +The table below shows the status code and the possible values of `reason` in `ErrorInfo` in each RPC in the `SqlTwoPhaseCommitTransaction` service: + +| RPC | Status code | `reason` in `ErrorInfo` | Description | +|-------------------|---------------------|----------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Begin, Join | INVALID_ARGUMENT | ILLEGAL_ARGUMENT | The argument in the request message is invalid. | +| Begin, Join | FAILED_PRECONDITION | ILLEGAL_STATE | The RPC was called in an invalid state. | +| Begin, Join | INTERNAL | HOP_LIMIT_EXCEEDED | The hop limit was exceeded. This occurs when the routing information between cluster nodes is inconsistent. The error is usually resolved in a short amount of time, so you can retry the transaction from the beginning after some time has passed since encountering this error. | +| Begin, Join | INTERNAL | INTERNAL_ERROR | The operation has failed due to transient or nontransient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is nontransient. | +| Execute | INVALID_ARGUMENT | ILLEGAL_ARGUMENT | The argument in the request message is invalid. | +| Execute | FAILED_PRECONDITION | ILLEGAL_STATE | The RPC was called in an invalid state. | +| Execute | NOT_FOUND | TRANSACTION_NOT_FOUND | The transaction associated with the specified transaction ID was not found. This error indicates that the transaction has expired or the routing information has been updated due to cluster topology changes. In this case, please retry the transaction from the beginning. | +| Execute | INTERNAL | HOP_LIMIT_EXCEEDED | The hop limit was exceeded. This occurs when the routing information between cluster nodes is inconsistent. The error is usually resolved in a short amount of time, so you can retry the transaction from the beginning after some time has passed since encountering this error. | +| Execute | FAILED_PRECONDITION | TRANSACTION_CONFLICT | A transaction conflict occurred. If you encounter this error, please retry the transaction from the beginning. | +| Execute | INTERNAL | INTERNAL_ERROR | The operation has failed due to transient or nontransient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is nontransient. | +| Prepare, Validate | INVALID_ARGUMENT | ILLEGAL_ARGUMENT | The argument in the request message is invalid. | +| Prepare, Validate | FAILED_PRECONDITION | ILLEGAL_STATE | The RPC was called in an invalid state. | +| Prepare, Validate | NOT_FOUND | TRANSACTION_NOT_FOUND | The transaction associated with the specified transaction ID was not found. This error indicates that the transaction has expired or the routing information has been updated due to cluster topology changes. In this case, please retry the transaction from the beginning. | +| Prepare, Validate | INTERNAL | HOP_LIMIT_EXCEEDED | The hop limit was exceeded. This occurs when the routing information between cluster nodes is inconsistent. The error is usually resolved in a short amount of time, so you can retry the transaction from the beginning after some time has passed since encountering this error. | +| Prepare, Validate | FAILED_PRECONDITION | TRANSACTION_CONFLICT | A transaction conflict occurred. If you encounter this error, please retry the transaction from the beginning. | +| Prepare, Validate | INTERNAL | INTERNAL_ERROR | The operation has failed due to transient or nontransient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is nontransient. | +| Commit | INVALID_ARGUMENT | ILLEGAL_ARGUMENT | The argument in the request message is invalid. | +| Commit | FAILED_PRECONDITION | ILLEGAL_STATE | The RPC was called in an invalid state. | +| Commit | NOT_FOUND | TRANSACTION_NOT_FOUND | The transaction associated with the specified transaction ID was not found. This error indicates that the transaction has expired or the routing information has been updated due to cluster topology changes. In this case, please retry the transaction from the beginning. | +| Commit | INTERNAL | HOP_LIMIT_EXCEEDED | The hop limit was exceeded. This occurs when the routing information between cluster nodes is inconsistent. The error is usually resolved in a short amount of time, so you can retry the transaction from the beginning after some time has passed since encountering this error. | +| Commit | FAILED_PRECONDITION | TRANSACTION_CONFLICT | A transaction conflict occurred. If you encounter this error, please retry the transaction from the beginning. | +| Commit | INTERNAL | UNKNOWN_TRANSACTION_STATUS | The status of the transaction is unknown (it is uncertain whether the transaction was successfully committed or not). In this situation, you need to check whether the transaction was successfully committed, and if not, to retry it. The responsibility for determining the transaction status rests with the users. It may be beneficial to create a transaction status table and update it in conjunction with other application data so that you can determine the status of a transaction from the table itself. | +| Commit | INTERNAL | INTERNAL_ERROR | The operation has failed due to transient or nontransient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is nontransient. | +| Rollback | INVALID_ARGUMENT | ILLEGAL_ARGUMENT | The argument in the request message is invalid. | +| Rollback | FAILED_PRECONDITION | ILLEGAL_STATE | The RPC was called in an invalid state. | +| Rollback | NOT_FOUND | TRANSACTION_NOT_FOUND | The transaction associated with the specified transaction ID was not found. In case of a rollback, you do not need to retry the transaction because the transaction will expire automatically. | +| Rollback | INTERNAL | HOP_LIMIT_EXCEEDED | The hop limit was exceeded. In case of a rollback, you do not need to retry the transaction because the transaction will expire automatically. | +| Rollback | INTERNAL | INTERNAL_ERROR | The operation has failed due to transient or nontransient faults. You can try retrying the transaction from the beginning, but the transaction may still fail if the cause is nontransient. | + +If you encounter an error, you should roll back the transaction, except in the case of `Begin` or `Join`. +Then, you can retry the transaction from the beginning for the errors that can be resolved by retrying. + +Besides the errors listed above, you may encounter errors returned by the gRPC library. +In these cases, the response will not contain `ErrorInfo`. +For details, refer to the [gRPC documentation](https://grpc.io/docs/guides/error/#error-status-codes). + +You can set a deadline for each RPC in gRPC. +If the deadline is exceeded, you will receive a `DEADLINE_EXCEEDED` error. +In general, you should roll back the transaction in this situation, unless the RPC is `Begin`, `Join`, or `Commit`. +In the case of `Commit`, the situation is equivalent to `UNKNOWN_TRANSACTION_STATUS` (it is uncertain whether the transaction was successfully committed or not), and you must handle the error in the same way. + +## How to use the `Metadata` service + +The `Metadata` service provides the following RPCs: + +- `GetNamespaceMetadata`: Retrieves namespace metadata of the specified namespace. +- `ListTableMetadataInNamespace`: Retrieves table metadata of tables in the specified namespace. +- `GetTableMetadata`: Retrieves table metadata of the specified table. + +### Error handling + +The table below shows the status code and the possible values of `reason` in `ErrorInfo` for all RPCs in the `Metadata` service: + +| Status code | `reason` in `ErrorInfo` | Description | +|---------------------|----------------------------|-------------------------------------------------| +| INVALID_ARGUMENT | ILLEGAL_ARGUMENT | The argument in the request message is invalid. | +| FAILED_PRECONDITION | ILLEGAL_STATE | The RPC was called in an invalid state. | +| INTERNAL | INTERNAL_ERROR | The operation has failed. | + +Besides the errors listed above, you may encounter errors returned by the gRPC library. +In these cases, the response will not contain `ErrorInfo`. +For details, refer to the [gRPC documentation](https://grpc.io/docs/guides/error/#error-status-codes). diff --git a/docs/3.12/scalardb-cluster/setup-scalardb-cluster-on-kubernetes-by-using-helm-chart.md b/docs/3.12/scalardb-cluster/setup-scalardb-cluster-on-kubernetes-by-using-helm-chart.md new file mode 100644 index 00000000..cde81bef --- /dev/null +++ b/docs/3.12/scalardb-cluster/setup-scalardb-cluster-on-kubernetes-by-using-helm-chart.md @@ -0,0 +1,211 @@ +# Set Up ScalarDB Cluster on Kubernetes by Using a Helm Chart + +This document provides instructions on how to set up a ScalarDB Cluster by using a Helm Chart on a Kubernetes cluster, specifically designed for a test environment. + +## Prerequisites + +* Kubernetes cluster (such as MiniKube or Kind) +* kubectl +* Helm + +In addition, you need access to the [ScalarDB Cluster Node docker image](https://github.com/orgs/scalar-labs/packages/container/package/scalardb-cluster-node). +This repository is available only to users with a commercial license and permission. +To get a license and permission, please [contact us](https://scalar-labs.com/contact_us/). + +## What you will create + +You will be deploying the following components on a Kubernetes cluster as depicted below: + +``` ++---------------------------------------------------------------------------------------------------------------------------------------+ +| [Kubernetes Cluster] | +| | +| [Pod] [Pod] [Pod] | +| | +| +-------+ | +| +---> | Envoy | ---+ | +| | +-------+ | | +| | | | +| +---------+ | +-------+ | +--------------------+ | +| | Service | ---+---> | Envoy | ---+---------> | Service | ---+ | +| | (Envoy) | | +-------+ | | (ScalarDB Cluster) | | | +| +---------+ | | +--------------------+ | +-----------------------+ | +| | +-------+ | | +---> | ScalarDB Cluster Node | ---+ | +| +---> | Envoy | ---+ | | +-----------------------+ | | +| +-------+ | | | | +| | | +-----------------------+ | +------------+ | +| +---+---> | ScalarDB Cluster Node | ---+---> | PostgreSQL | | +| | | +-----------------------+ | +------------+ | +| | | | | +| | | +-----------------------+ | | +| | +---> | ScalarDB Cluster Node | ---+ | +| | +-----------------------+ | +| +----------------------------+ | | +| | Service | ---+ | +| | (ScalarDB Cluster GraphQL) | | +| +----------------------------+ | +| | ++---------------------------------------------------------------------------------------------------------------------------------------+ +``` + +## Step 1. Start a PostgreSQL container + +ScalarDB Cluster must use some kind of database system as its backend database. +The database that is used in this document is PostgreSQL. + +You can deploy PostgreSQL on the Kubernetes cluster as follows: + +1. Add the Bitnami helm repository: + + ```console + helm repo add bitnami https://charts.bitnami.com/bitnami + ``` + +2. Deploy PostgreSQL: + + ```console + helm install postgresql-scalardb-cluster bitnami/postgresql \ + --set auth.postgresPassword=postgres \ + --set primary.persistence.enabled=false + ``` + +3. Check if the PostgreSQL container is running: + + ```console + kubectl get pod + ``` + + Command execution result: + + ```console + NAME READY STATUS RESTARTS AGE + postgresql-scalardb-cluster-0 1/1 Running 0 17s + ``` + +## Step 2. Deploy ScalarDB Cluster on the Kubernetes cluster by using a Helm Chart + +1. Add the Scalar helm repository: + + ```console + helm repo add scalar-labs https://scalar-labs.github.io/helm-charts + ``` + +2. Create a secret resource to pull the ScalarDB Cluster container images from GitHub packages: + + ```console + kubectl create secret docker-registry reg-docker-secrets \ + --docker-server=ghcr.io \ + --docker-username= \ + --docker-password= + ``` + +3. Create a custom values file for ScalarDB Cluster (`scalardb-cluster-custom-values.yaml`): + + ```console + cat << 'EOF' > scalardb-cluster-custom-values.yaml + envoy: + enabled: true + service: + type: "LoadBalancer" + + scalardbCluster: + scalardbClusterNodeProperties: | + # ScalarDB Cluster configurations + scalar.db.cluster.membership.type=KUBERNETES + scalar.db.cluster.membership.kubernetes.endpoint.namespace_name=${env:SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAMESPACE_NAME} + scalar.db.cluster.membership.kubernetes.endpoint.name=${env:SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAME} + + # Storage configurations + scalar.db.storage=jdbc + scalar.db.contact_points=jdbc:postgresql://postgresql-scalardb-cluster.default.svc.cluster.local:5432/postgres + scalar.db.username=postgres + scalar.db.password=postgres + + # For ScalarDB Cluster GraphQL tutorial. + scalar.db.graphql.enabled=true + scalar.db.graphql.namespaces=emoney + + # For ScalarDB Cluster SQL tutorial. + scalar.db.sql.enabled=true + graphql: + enabled: true + service: + type: "LoadBalancer" + EOF + ``` + + For the tutorials, the service type for ScalarDB Cluster GraphQL and Envoy is set to `LoadBalancer`. + +4. Deploy ScalarDB Cluster: + + ```console + helm install scalardb-cluster scalar-labs/scalardb-cluster -f ./scalardb-cluster-custom-values.yaml + ``` + +5. Check if the ScalarDB Cluster pods are deployed: + + ```console + kubectl get pod + ``` + + Command execution result: + + ```console + NAME READY STATUS RESTARTS AGE + postgresql-scalardb-cluster-0 1/1 Running 0 84s + scalardb-cluster-envoy-59899dc588-477tg 1/1 Running 0 35s + scalardb-cluster-envoy-59899dc588-dpvhx 1/1 Running 0 35s + scalardb-cluster-envoy-59899dc588-lv9hx 1/1 Running 0 35s + scalardb-cluster-node-866c756c79-5v2tk 1/1 Running 0 35s + scalardb-cluster-node-866c756c79-9zhq5 1/1 Running 0 35s + scalardb-cluster-node-866c756c79-t6v86 1/1 Running 0 35s + ``` + + If the ScalarDB Cluster Node Pods and the Envoy Pods are deployed properly, the `STATUS` for each pod will be `Running`. + +6. Check if the service resources of the ScalarDB Cluster are deployed: + + ```console + kubectl get svc + ``` + + Command execution result: + + ```console + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + kubernetes ClusterIP 10.96.0.1 443/TCP 260d + postgresql-scalardb-cluster ClusterIP 10.110.97.40 5432/TCP 86s + postgresql-scalardb-cluster-hl ClusterIP None 5432/TCP 86s + scalardb-cluster-envoy LoadBalancer 10.105.121.51 localhost 60053:30641/TCP 49s + scalardb-cluster-envoy-metrics ClusterIP 10.111.131.189 9001/TCP 49s + scalardb-cluster-graphql LoadBalancer 10.105.74.214 localhost 8080:30514/TCP 49s + scalardb-cluster-headless ClusterIP None 60053/TCP 49s + scalardb-cluster-metrics ClusterIP 10.110.132.22 9080/TCP 49s + ``` + + If the service resources of ScalarDB Cluster and Envoy are deployed properly, the private IP addresses in the `CLUSTER-IP` column will be displayed. (**Note:** `scalardb-cluster-headless` has no `CLUSTER-IP` address.) + You can also see `EXTERNAL-IP` addresses assigned to the service resource of ScalarDB Cluster GraphQL (`scalardb-cluster-graphql`) and the service resource of Envoy (`scalardb-cluster-envoy`) as `TYPE` is set to `LoadBalancer`. + + In addition, the access method to the `LoadBalancer` service from your environment depends on each Kubernetes distribution. For example: + + * If you use Minikube, you can use the [`minikube tunnel` command](https://minikube.sigs.k8s.io/docs/commands/tunnel/). + * If you use Kind, you can use [MetalLB](https://kind.sigs.k8s.io/docs/user/loadbalancer/). + + For details on how to access the `LoadBalancer` service, see the official documents of the Kubernetes distribution that you use. + +## Delete all resources + +You can delete all resources created in this document by running the following command: + +```console +helm uninstall scalardb-cluster postgresql-scalardb-cluster +``` + +## Next steps + +To get familiar with other use cases for ScalarDB Cluster, try the following tutorials: + +* [Getting Started with ScalarDB Cluster](getting-started-with-scalardb-cluster.md) +* [Getting Started with ScalarDB Cluster GraphQL](getting-started-with-scalardb-cluster-graphql.md) +* [Getting Started with ScalarDB Cluster SQL via JDBC](getting-started-with-scalardb-cluster-sql-jdbc.md) +* [Getting Started with ScalarDB Cluster SQL via Spring Data JDBC for ScalarDB](getting-started-with-scalardb-cluster-sql-spring-data-jdbc.md) diff --git a/docs/3.12/scalardb-cluster/standalone-mode.md b/docs/3.12/scalardb-cluster/standalone-mode.md new file mode 100644 index 00000000..44a49ea9 --- /dev/null +++ b/docs/3.12/scalardb-cluster/standalone-mode.md @@ -0,0 +1,106 @@ +# ScalarDB Cluster Standalone Mode + +Instead of setting up a Kubernetes cluster and deploying ScalarDB Cluster on top of it by using a Helm Chart, you can run ScalarDB Cluster in standalone mode, which simplifies development and testing processes. A primary use case for this would be when you want to start ScalarDB Cluster in standalone mode via Docker on your local machine and use it for development and testing. + +To run ScalarDB Cluster in standalone mode, you need to set the `scalar.db.cluster.node.standalone_mode.enabled` property to `true`: + +```properties +scalar.db.cluster.node.standalone_mode.enabled=true +``` + +## Usage with Docker + +You can start ScalarDB Cluster in standalone mode by running the following command, replacing `` with the path to the ScalarDB Cluster properties file and `` with the version of ScalarDB Cluster that you are using: + +```console +$ docker run -v :/scalardb-cluster/node/scalardb-cluster-node.properties -d -p 60053:60053 -p 8080:8080 ghcr.io/scalar-labs/scalardb-cluster-node: +``` + +In this case, you must include the property `scalar.db.cluster.node.standalone_mode.enabled` and set it to `true` in the properties file. + +You can also start ScalarDB Cluster by using environment variables as follows by setting the variable `SCALAR_DB_CLUSTER_NODE_STANDALONE_MODE_ENABLED` to `true`. + +```console +$ docker run \ +--env SCALAR_DB_CLUSTER_NODE_STANDALONE_MODE_ENABLED=true \ +--env SCALAR_DB_STORAGE=cassandra \ +--env SCALAR_DB_CONTACT_POINTS=localhost \ +--env SCALAR_DB_CONTACT_PORT=9042 \ +--env SCALAR_DB_USERNAME=cassandra \ +--env SCALAR_DB_PASSWORD=cassandra \ +--env SCALAR_DB_GRAPHQL_ENABLED=true \ +--env SCALAR_DB_SQL_ENABLED=true \ +-d -p 60053:60053 -p 8080:8080 ghcr.io/scalar-labs/scalardb-cluster-node: +``` + +The following environment variables are available for the Docker image: + +| Name | Corresponding configuration | +|-------------------------------------------------------------------|-------------------------------------------------------------------| +| `SCALAR_DB_CLUSTER_MEMBERSHIP_TYPE` | `scalar.db.cluster.membership.type` | +| `SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAMESPACE_NAME` | `scalar.db.cluster.membership.kubernetes.endpoint.namespace_name` | +| `SCALAR_DB_CLUSTER_MEMBERSHIP_KUBERNETES_ENDPOINT_NAME` | `scalar.db.cluster.membership.kubernetes.endpoint.name` | +| `SCALAR_DB_CLUSTER_GRPC_DEADLINE_DURATION_MILLIS` | `scalar.db.cluster.grpc.deadline_duration_millis` | +| `SCALAR_DB_CLUSTER_NODE_STANDALONE_MODE_ENABLED` | `scalar.db.cluster.node.standalone_mode.enabled` | +| `SCALAR_DB_STORAGE` | `scalar.db.storage` | +| `SCALAR_DB_CONTACT_POINTS` | `scalar.db.contact_points` | +| `SCALAR_DB_CONTACT_PORT` | `scalar.db.contact_port` | +| `SCALAR_DB_USERNAME` | `scalar.db.username` | +| `SCALAR_DB_PASSWORD` | `scalar.db.password` | +| `SCALAR_DB_CROSS_PARTITION_SCAN_ENABLED` | `scalar.db.cross_partition_scan.enabled` | +| `SCALAR_DB_CROSS_PARTITION_SCAN_FILTERING_ENABLED` | `scalar.db.cross_partition_scan.filtering.enabled` | +| `SCALAR_DB_CROSS_PARTITION_SCAN_ORDERING_ENABLED` | `scalar.db.cross_partition_scan.ordering.enabled` | +| `SCALAR_DB_CONSENSUS_COMMIT_ISOLATION_LEVEL` | `scalar.db.consensus_commit.isolation_level` | +| `SCALAR_DB_CONSENSUS_COMMIT_SERIALIZABLE_STRATEGY` | `scalar.db.consensus_commit.serializable_strategy` | +| `SCALAR_DB_CONSENSUS_COMMIT_PARALLEL_EXECUTOR_COUNT` | `scalar.db.consensus_commit.parallel_executor_count` | +| `SCALAR_DB_CONSENSUS_COMMIT_PARALLEL_PREPARATION_ENABLED` | `scalar.db.consensus_commit.parallel_preparation.enabled` | +| `SCALAR_DB_CONSENSUS_COMMIT_PARALLEL_COMMIT_ENABLED` | `scalar.db.consensus_commit.parallel_commit.enabled` | +| `SCALAR_DB_CONSENSUS_COMMIT_ASYNC_COMMIT_ENABLED` | `scalar.db.consensus_commit.async_commit.enabled` | +| `SCALAR_DB_GRAPHQL_ENABLED` | `scalar.db.graphql.enabled` | +| `SCALAR_DB_GRAPHQL_PORT` | `scalar.db.graphql.port` | +| `SCALAR_DB_GRAPHQL_PATH` | `scalar.db.graphql.path` | +| `SCALAR_DB_GRAPHQL_NAMESPACES` | `scalar.db.graphql.namespaces` | +| `SCALAR_DB_GRAPHQL_GRAPHIQL` | `scalar.db.graphql.graphiql` | +| `SCALAR_DB_GRAPHQL_SCHEMA_CHECKING_INTERVAL_MILLIS` | `scalar.db.graphql.schema_checking_interval_millis` | +| `SCALAR_DB_SQL_ENABLED` | `scalar.db.sql.enabled` | +| `SCALAR_DB_SQL_STATEMENT_CACHE_ENABLED` | `scalar.db.sql.statement_cache.enabled` | +| `SCALAR_DB_SQL_STATEMENT_CACHE_SIZE` | `scalar.db.sql.statement_cache.size` | +| `SCALAR_DB_CLUSTER_AUTH_ENABLED` | `scalar.db.cluster.auth.enabled` | +| `SCALAR_DB_CLUSTER_AUTH_CACHE_EXPIRATION_TIME_MILLIS` | `scalar.db.cluster.auth.cache_expiration_time_millis` | +| `SCALAR_DB_CLUSTER_AUTH_AUTH_TOKEN_EXPIRATION_TIME_MINUTES` | `scalar.db.cluster.auth.auth_token_expiration_time_minutes` | +| `SCALAR_DB_CLUSTER_AUTH_AUTH_TOKEN_GC_THREAD_INTERVAL_MINUTES` | `scalar.db.cluster.auth.auth_token_gc_thread_interval_minutes` | +| `SCALAR_DB_CLUSTER_TLS_ENABLED` | `scalar.db.cluster.tls.enabled` | +| `SCALAR_DB_CLUSTER_TLS_CA_ROOT_CERT_PATH` | `scalar.db.cluster.tls.ca_root_cert_path` | +| `SCALAR_DB_CLUSTER_TLS_OVERRIDE_AUTHORITY` | `scalar.db.cluster.tls.override_authority` | +| `SCALAR_DB_CLUSTER_NODE_TLS_CERT_CHAIN_PATH` | `scalar.db.cluster.node.tls.cert_chain_path` | +| `SCALAR_DB_CLUSTER_NODE_TLS_PRIVATE_KEY_PATH` | `scalar.db.cluster.node.tls.private_key_path` | + +If you want to specify configurations other than the ones mentioned in the table above, set up your custom properties file by using the configurations described in [ScalarDB Cluster Configurations](scalardb-cluster-configurations.md). + +## Usage with Docker Compose + +You can start ScalarDB Cluster in standalone mode on Docker Compose by using the `docs/standalone-mode/docker-compose.yaml` file. This file includes PostgreSQL as the backend database for ScalarDB Cluster. + +To start ScalarDB Cluster in standalone mode on Docker Compose, first go to the folder that contains the `docker-compose.yaml` file by running the following command: + +```console +cd docs/standalone-mode/ +``` + +Then, start Docker Compose by running the following command: + +{% capture notice--info %} +**Note** + +To change the configuration of ScalarDB Cluster, update the `docs/standalone-mode/scalardb-cluster-node.properties` file before running the command below. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +```console +docker compose up -d +``` + +## Client configurations for the ScalarDB Cluster Java API + +You can use the `indirect` client mode to connect to ScalarDB Cluster in standalone mode. For details about client configurations for the ScalarDB Cluster Java API, see [Developer Guide for ScalarDB Cluster with the Java API](developer-guide-for-scalardb-cluster-with-java-api.md). diff --git a/docs/3.12/scalardb-cluster/standalone-mode/docker-compose.yaml b/docs/3.12/scalardb-cluster/standalone-mode/docker-compose.yaml new file mode 100644 index 00000000..5417be60 --- /dev/null +++ b/docs/3.12/scalardb-cluster/standalone-mode/docker-compose.yaml @@ -0,0 +1,28 @@ +version: "3.5" +services: + + postgresql: + container_name: "postgresql" + image: "postgres:15" + ports: + - 5432:5432 + environment: + - POSTGRES_PASSWORD=postgres + healthcheck: + test: ["CMD-SHELL", "pg_isready || exit 1"] + interval: 1s + timeout: 10s + retries: 60 + start_period: 30s + + scalardb-cluster-standalone: + container_name: "scalardb-cluser-node" + image: "ghcr.io/scalar-labs/scalardb-cluster-node:3.12.0" + ports: + - 60053:60053 + - 8080:8080 + volumes: + - ./scalardb-cluster-node.properties:/scalardb-cluster/node/scalardb-cluster-node.properties + depends_on: + postgresql: + condition: service_healthy diff --git a/docs/3.12/scalardb-cluster/standalone-mode/scalardb-cluster-node.properties b/docs/3.12/scalardb-cluster/standalone-mode/scalardb-cluster-node.properties new file mode 100644 index 00000000..6ec8f9a0 --- /dev/null +++ b/docs/3.12/scalardb-cluster/standalone-mode/scalardb-cluster-node.properties @@ -0,0 +1,5 @@ +scalar.db.storage=jdbc +scalar.db.contact_points=jdbc:postgresql://postgresql:5432/postgres +scalar.db.username=postgres +scalar.db.password=postgres +scalar.db.cluster.node.standalone_mode.enabled=true diff --git a/docs/3.12/scalardb-graphql/aws-deployment-guide.md b/docs/3.12/scalardb-graphql/aws-deployment-guide.md new file mode 100644 index 00000000..2df11697 --- /dev/null +++ b/docs/3.12/scalardb-graphql/aws-deployment-guide.md @@ -0,0 +1,298 @@ +# Deployment Guide on AWS + +This document explains how to deploy ScalarDB GraphQL servers on the Amazon EKS environment. + +We will deploy multiple GraphQL servers to an EKS cluster with an [AWS Load Balancer Controller](https://docs.aws.amazon.com/eks/latest/userguide/aws-load-balancer-controller.html), which manages an internal AWS Application Load Balancer. The AWS Application Load Balancer handles HTTP cookie-based session affinity. When we deploy multiple GraphQL servers, session affinity is required to handle transactions properly. This is because GraphQL servers keep the transactions in memory, so GraphQL queries that use continued transactions must be routed to the same server that started the transaction. + +## What we create + +In this guide, we will create the following components. + +* A VPC with NAT gateway +* An EKS cluster with a Kubernetes node group +* A managed database service +* A bastion instance with a public IP +* An internal ALB for load balancing HTTP requests using the [AWS Load Balancer Controller](https://docs.aws.amazon.com/eks/latest/userguide/aws-load-balancer-controller.html). + +## Step 1. Configure an EKS cluster + +Please follow steps **1** to **3** in the [Deploy Scalar DL on AWS](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/ManualDeploymentGuideScalarDLOnAWS.md) guide to set up a VPC, subnets, a bastion host, a database, an EKS cluster, and a node group. + +**Note:** That guide instructs to add a label to the node group with a key `agentpool` and a value `scalardlpool`. However, since we will deploy GraphQL servers, not Scalar DL, we should change the label. For example, the node group can be labeled with a key `agentpool` and a value `scalardbgraphqlpool`. The label can be used to select nodes when deploying ScalarDB GraphQL servers. + +In this document, a placeholder cluster name `` will be used in command examples. Please change it to your cluster name. + +## Step 2. Load Database Schema + +We need to load a database schema to the database before starting ScalarDB GraphQL. + +Here is an example for DynamoDB. For other databases and more detailed instructions, please refer to the [ScalarDB Schema Loader](https://github.com/scalar-labs/scalardb/blob/master/docs/schema-loader.md). + +1. Create a `database.properties` configuration file for DynamoDB + + ```properties + scalar.db.contact_points= + scalar.db.username= + scalar.db.password= + scalar.db.storage=dynamo + ``` + +2. Create a `schema.json` file. Please refer to the sample in the [ScalarDB Schema Loader](https://github.com/scalar-labs/scalardb/blob/master/schema-loader) document. + +3. Load the schema to the database with ScalarDB Schema Loader + + ```console + docker run --rm -v $PWD/schema.json:/schema.json \ + -v $PWD/database.properties:/database.properties \ + ghcr.io/scalar-labs/scalardb-schema-loader:3.5.2 \ + -c /database.properties -f /schema.json --coordinator + ``` + +## Step 3. Install eksctl + +Log in to the bastion host and install the `eksctl` command by following the official documents. + +* [Introduction - eksctl](https://eksctl.io/introduction/#installation) +* [Installing eksctl - Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/eksctl.html) + +We also need to install AWS CLI and configure AWS API credentials. + +* [AWS Command Line Interface](https://aws.amazon.com/cli/) + +## Step 4. Install the AWS Load Balancer Controller add-on to EKS + +The AWS Load Balancer Controller is a controller to manage AWS Elastic Load Balancers for a Kubernetes cluster. + +We use it to create an AWS Application Load Balancer (ALB) in front of multiple GraphQL servers in the cluster. When we create a Kubernetes **Ingress** resource in the cluster, the controller provisions an ALB according to it. + +In this section, we will install the controller according to the official installation documents below. + +* [Installing the AWS Load Balancer Controller add-on - Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/aws-load-balancer-controller.html) +* [Installation Guide - AWS Load Balancer Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/deploy/installation/) + +### Set up IAM Permissions + +We need to set up IAM permissions before installing the AWS Load Balancer Controller so that the controller running on the worker nodes can manage ALBs on AWS. + +1. Create IAM OpenID Connect (OIDC) provider + + ```console + eksctl utils associate-iam-oidc-provider --cluster --approve + ``` + + In the AWS Management Console, we can see an IAM Identity provider has been created. + + ![oidc-provider](images/oidc-provider.png) + +2. Download the IAM policy document for the AWS Load Balancer Controller + + ```console + curl -o iam-policy.json https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.4.1/docs/install/iam_policy.json + ``` + +3. Create an IAM policy named `AWSLoadBalancerControllerIAMPolicy` + + ```console + aws iam create-policy \ + --policy-name AWSLoadBalancerControllerIAMPolicy \ + --policy-document file://iam-policy.json + ``` + + Please take note of the policy ARN created with the command, as we will specify it in the next step. + + In the AWS Management Console, we can see an IAM Policy has been created. + + ![lb-controller-iam-policy](images/lb-controller-iam-policy.png) + +4. Create an IAM role and ServiceAccount for the AWS Load Balancer controller + + ```console + eksctl create iamserviceaccount \ + --cluster= \ + --namespace=kube-system \ + --name=aws-load-balancer-controller \ + --attach-policy-arn=arn:aws:iam:::policy/AWSLoadBalancerControllerIAMPolicy \ + --override-existing-serviceaccounts \ + --approve + ``` + + Please specify the policy ARN from the previous step as the `--attach-policy-arn` option. + + To check if the IAM service account has been created, run: + + ```console + $ eksctl get iamserviceaccount --cluster= + NAMESPACE NAME ROLE ARN + kube-system aws-load-balancer-controller arn:aws:iam::***:role/****** + ``` + + In the AWS Management Console, we can see an IAM Role has been created and the policy is attached to it. + + ![iam-role-for-serviceaccount](images/iam-role-for-serviceaccount.png) + +### Add AWS Load Balancer Controller to Cluster + +Now we can install the AWS Load Balancer Controller with Helm. + +1. Add the EKS chart repo to helm + + ```console + helm repo add eks https://aws.github.io/eks-charts + ``` + +2. Install the helm chart + + We need to specify both of the chart values `serviceAccount.create=false` and `serviceAccount.name=aws-load-balancer-controller` since we have already created a service account named `aws-load-balancer-controller` in the previous section. + + ```console + helm install aws-load-balancer-controller \ + eks/aws-load-balancer-controller -n kube-system \ + --set clusterName= \ + --set serviceAccount.create=false \ + --set serviceAccount.name=aws-load-balancer-controller + ``` + + **Note:** If upgrading the chart using `helm upgrade` instead of `helm install`, the TargetGroupBinding CRDs must be installed manually before running the previous command. + + ```console + kubectl apply -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller//crds?ref=master" + ``` + +3. Verify that the controller is installed + + ```console + $ kubectl get deployment -n kube-system aws-load-balancer-controller + NAME READY UP-TO-DATE AVAILABLE AGE + aws-load-balancer-controller 2/2 2 2 11s + ``` + +## Step 5. Install ScalarDB GraphQL + +1. Create a secret to get the image from ghcr.io. + + ```console + kubectl create secret docker-registry reg-docker-secrets \ + --docker-server=ghcr.io \ + --docker-username= \ + --docker-password= + ``` + + The GitHub Personal Access Token (PAT) specified here must have access with the `read:packages` scope. + +2. Create a custom values file. + + Download the `scalardb-graphql-custom-values.yaml` from the [scalar-kubernetes](https://github.com/scalar-labs/scalar-kubernetes/tree/master/conf) repository. Note that the file is going to be versioned in the future, so you might want to change the branch to use a proper version. + + Please update the database configuration in the `scalarDbGraphQlConfiguration` key according to your needs. The `namespaces` should be the namespace you have loaded to the database. + + Under the `ingress` key, the `className` must be `alb` so that the Ingress object can control the ALB. The `annotations` specifies the behavior of the Ingress object. Especially, `alb.ingress.kubernetes.io/target-group-attributes` specifies the attributes of the [Target Groups](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-target-groups.html). `stickiness.enabled=true` is required here because we need cookie-based sticky sessions. (This setting requires `alb.ingress.kubernetes.io/target-type` to be set to `ip`). For more details about annotations, please refer to [Ingress annotations](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/guide/ingress/annotations/). + + The `affinity` key specifies node affinity. With this example settings, the pods are scheduled based on node labels. The pods will be deployed to the nodes labeled with the key `agentpool` and the value `scalardbgraphqlpool`. + + The `tolerations` key is applied to the pods. With this example, you should add taints `kubernetes.io/app=scalardbgraphqlpool:NoSchedule` to each worker node where you do not want to deploy other pods than the ScalarDB GraphQL pod (e.g. application pods). In other words, the tainted nodes will be dedicated nodes for ScalarDB GraphQL. + +3. Add a tag to the subnets + + As shown in the custom values file above, we are creating an internal ALB by setting the annotation `alb.ingress.kubernetes.io/scheme` to `internal` (default). The internal ALB will discover subnets by the tag `kubernetes.io/role/internal-elb`. For more information on the Subnet Discovery, please refer to the documentation of the AWS Load Balancer Controller: [Subnet Discovery - AWS Load Balancer Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/deploy/subnet_discovery/) + + Add the tag to the private subnets using the AWS Management Console or by running the following `aws` commmand. + + ```console + aws ec2 create-tags --resources ... --tags "Key=kubernetes.io/role/internal-elb,Value=1" + ``` + +4. Install the Helm chart + + ```console + # Add Helm charts + helm repo add scalar-labs https://scalar-labs.github.io/helm-charts + + # Install ScalarDB GraphQL with a release name `my-release-scalardb-graphql` + helm upgrade --install my-release-scalardb-graphql \ + scalar-labs/scalardb-graphql --namespace default \ + -f scalardb-graphql-custom-values.yaml + ``` + +5. Verify that an internal ALB has been created + + ```console + $ kubectl get ingress + NAME CLASS HOSTS ADDRESS PORTS AGE + my-release-scalardb-graphql alb * internal-k8s-default-myreleas-551ab46922-2136573255.us-west-1.elb.amazonaws.com 80 9m36s + ``` + + The ALB DNS name is output in the ADDRESS column. + +6. Test the ALB is working + + The following `curl` command sends a simple GraphQL request to the ALB. The ALB should return a `200` response. + + ```console + $ curl 'http:///graphql -v -H 'Content-Type: application/graphql' --data '{__typename}' + ... + > POST /graphql HTTP/1.1 + ... + < HTTP/1.1 200 OK + ... + < Set-Cookie: AWSALB=...; Expires=Tue, 24 May 2022 07:24:06 GMT; Path=/ + < Set-Cookie: AWSALBCORS=...; Expires=Tue, 24 May 2022 07:24:06 GMT; Path=/; SameSite=None + ... + {"data":{"__typename":"Query"}} + ``` + + **Note on cookies**: In this example, the ALB returns `Set-Cookie` response headers since the sticky sessions attributes are specified for the ALB Target Groups in the custom values file. By handling cookies, you can route the subsequent requests to the same target behind the ALB. + +## Clean up the resources + +When you need to remove the resources you have created, uninstall/delete the resources in the following order. + +* ScalarDB GraphQL +* IAM service account +* Load Balancer IAM Policy +* IAM OpenID Connect (OIDC) provider +* Other resources + +### Uninstall ScalarDB GraphQL + +The following command uninstalls ScalarDB GraphQL with a release name `my-release-scalardb-graphql`. This command will also remove the ALB since the Ingress resource manages it. + +```console +helm uninstall my-release-scalardb-graphql --namespace default +``` + +### Delete IAM Service Account + +Delete the IAM policy that was created by the `eksctl create iamserviceaccount` command. + +```console +eksctl delete iamserviceaccount --cluster= --namespace=kube-system --name=aws-load-balancer-controller +``` + +### Delete Load Balancer IAM Policy + +Delete the IAM policy created by the `aws iam create-policy` command. + +```console +aws iam delete-policy --policy-arn arn:aws:iam:::policy/AWSLoadBalancerControllerIAMPolicy +``` + +### Delete IAM OpenID Connect (OIDC) provider + +Delete the IAM OpenID Connect (OIDC) provider created by the `eksctl utils associate-iam-oidc-provider`. + +If you delete the cluster with an `eksctl delete cluster` command, the provider will be deleted automatically. But if you are deleting the cluster in a different way, or if you need to delete the provider without deleting the cluster, use the following commands: + +```console +# List the providers +aws iam list-open-id-connect-providers + +# Get the provider URL associated with the cluster +aws eks describe-cluster --name --output json | jq -r .cluster.identity.oidc.issuer + +# Find the ARN matching the URL from the list, and delete it +aws iam delete-open-id-connect-provider --open-id-connect-provider-arn +``` + +### Delete other resources + +Other resources created for deployment, such as node groups, EKS clusters, databases, VPCs, should be deleted as needed. diff --git a/docs/3.12/scalardb-graphql/getting-started-with-scalardb-graphql.md b/docs/3.12/scalardb-graphql/getting-started-with-scalardb-graphql.md new file mode 100644 index 00000000..461ccff0 --- /dev/null +++ b/docs/3.12/scalardb-graphql/getting-started-with-scalardb-graphql.md @@ -0,0 +1,235 @@ +# Getting Started with ScalarDB GraphQL + +ScalarDB GraphQL is an interface layer that allows client applications to communicate with a [ScalarDB](https://github.com/scalar-labs/scalardb) database with GraphQL. + +In this Getting Started guide, you will run a GraphQL server on your local machine. + +## Prerequisites + +We assume you have already installed Docker and have access to a ScalarDB-supported database such as Cassandra. Please configure them first by following [Getting Started with ScalarDB](https://github.com/scalar-labs/scalardb/blob/master/docs/getting-started-with-scalardb.md) if you have not set them up yet. + +You need a Personal Access Token (PAT) to access the Docker image of ScalarDB GraphQL in GitHub Container registry since the image is private. Ask a person in charge to get your account ready. Please read [the official document](https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry) for more detail. + +## Set up a database schema + +We use the following simple example schema. + +`emoney.json` + +```json +{ + "emoney.account": { + "transaction": true, + "partition-key": [ + "id" + ], + "clustering-key": [], + "columns": { + "id": "TEXT", + "balance": "INT" + } + } +} +``` + +To apply the schema to your database, download the Schema Loader that matches the version you use from [scalardb releases](https://github.com/scalar-labs/scalardb/releases), and run the following command to load the schema. + +```console +$ java -jar scalardb-schema-loader-.jar --config /path/to/database.properties -f emoney.json --coordinator +``` + +## Docker login + +`docker login` is required to start the ScalarDB GraphQL Docker image as described in the Prerequisites section. + +```console +# read:packages scope needs to be selected in a personal access token to login +$ export CR_PAT=YOUR_PERSONAL_ACCESS_TOKEN +$ echo $CR_PAT | docker login ghcr.io -u USERNAME --password-stdin +``` + +## Configure the GraphQL Server + +Add the following properties to your `database.properties` file. You can change the values as needed. Especially, make sure to set `namespaces` property. The tables in the specified namespaces (it can be a comma-separated list) will be exposed by the GraphQL server. + +```properties +scalar.db.graphql.port=8080 +scalar.db.graphql.path=/graphql +scalar.db.graphql.namespaces=emoney +scalar.db.graphql.graphiql=true +``` + +## Start up the GraphQL Server + +The following command starts up the ScalarDB GraphQL server. The first time you run the command, it will download the Docker image from GitHub Container Registry. + +```console +$ docker run -v /path/to/database.properties:/scalardb-graphql/database.properties.tmpl -p 8080:8080 ghcr.io/scalar-labs/scalardb-graphql: +``` + +At this point, the server reads the tables in the specified namespaces and generates a GraphQL schema to perform CRUD operations on the them. + +## Run operations from GraphiQL + +If the server is configured with a property `scalar.db.graphql.graphiql=true` (true by default), GraphiQL IDE will be available. When the above example properties are used, the endpoint URL of GraphiQL IDE is `http://localhost:8080/graphql`. Opening that URL with your web browser will take you to the GraphiQL screen. + +Let's insert the first record. In the left pane, paste the following mutation, then push the triangle-shaped `Execute Query` button at the top of the window. + +```graphql +mutation PutUser1 { + account_put(put: {key: {id: "user1"}, values: {balance: 1000}}) +} +``` + +ScalarDB GraphQL always runs queries with transactions. The above query starts a new transaction, executes a ScalarDB Put command, and commits the transaction at the end of the execution. + +The following response from the GraphQL server will appear in the right pane. + +```json +{ + "data": { + "account_put": true + } +} +``` + +The `"data"` field contains the result of the execution. This response shows the `account_put` field of the mutation was successful. The result type of mutations is `Boolean!`, which indicates whether the operation succeeded or not. + +Next, let's get the record you just inserted. Paste the following query next to the previous mutation in the left pane, and click the `Execute Query` button. Since you don't delete the `mutation PutUser1` above, a pull-down will appear below the button, and you can choose which operation should be executed. Choose `GetUser1`. + +```graphql +query GetUser1 { + account_get(get: {key: {id: "user1"}}) { + account { + id + balance + } + } +} +``` + +You should get the following result in the right pane. + +```json +{ + "data": { + "account_get": { + "account": { + "id": "user1", + "balance": 1000 + } + } + } +} +``` + +## Mappings between GraphQL API and ScalarDB Java API + +The automatically generated GraphQL schema defines queries, mutations, and object types for input/output to allow you to run CRUD operations for all the tables in the specified namespaces. These operations are designed to match the ScalarDB APIs defined in the [`DistributedTransaction`](https://javadoc.io/doc/com.scalar-labs/scalardb/latest/com/scalar/db/api/DistributedTransaction.html) interface. + +Assuming you have an `account` table in a namespace, the following queries and mutations will be generated. + +| ScalarDB API | GraphQL root type | GraphQL field | +|--------------------------------------------------------|-------------------|------------------------------------------------------------------------------------| +| `get(Get get)` | `Query` | `account_get(get: account_GetInput!): account_GetPayload` | +| `scan(Scan scan)` | `Query` | `account_scan(scan: account_ScanInput!): account_ScanPayload` | +| `put(Put put)` | `Mutation` | `account_put(put: account_PutInput!): Boolean!` | +| `put(java.util.List puts)` | `Mutation` | `account_bulkPut(put: [account_PutInput!]!): Boolean!` | +| `delete(Delete delete)` | `Mutation` | `account_delete(delete: account_DeleteInput!): Boolean!` | +| `delete(java.util.List deletes)` | `Mutation` | `account_bulkDelete(delete: [account_DeleteInput!]!): Boolean!` | +| `mutate(java.util.List mutations)` | `Mutation` | `account_mutate(put: [account_PutInput!]delete: [account_DeleteInput!]): Boolean!` | + +Note that the `scan` field is not generated for a table with no clustering key. This is the reason why `account_scan` field is not available in our `emoney` example in this document. + +You can see all generated GraphQL types in GraphiQL's Documentation Explorer (the `< Docs` link at the top-right corner). + +## Transaction across multiple requests + +This section describes how to run a transaction that spans multiple GraphQL requests. + +The generated schema provides the `@transaction` directive that allows you to identify transactions. This directive can be used with both queries and mutations. + +### Start a transaction before running an operation + +Adding a `@transaction` directive with no arguments to a query or a mutation directs the execution to start a new transaction. + +```graphql +query GetAccounts @transaction { + user1: account_get(get: {key: {id: "user1"}}) { + account { balance } + } + user2: account_get(get: {key: {id: "user2"}}) { + account { balance } + } +} +``` + +You will get a result with a transaction ID in the `extensions` field. The `id` value in the extensions is the transaction ID in which the operation in the request was run. In this case, this is the new ID of the transaction just started by the request. + +```json +{ + "data": { + "user1": { + "account": { + "balance": 1000 + } + }, + "user2": { + "account": { + "balance": 1000 + } + } + }, + "extensions": { + "transaction": { + "id": "c88da8a6-a13f-4857-82fe-45f1ab4150f9" + } + } +} +``` + +### Run an operation in a continued transaction + +To run the next queries or mutations in the transaction you started, specify the transaction ID as the `id` argument of the `@transaction`. The following example will update two accounts you got in the previous example in the same transaction. This represents a transfer of balance from user1's account to user2's account. + +```graphql +mutation Transfer @transaction(id: "c88da8a6-a13f-4857-82fe-45f1ab4150f9") { + user1: account_put(put: {key: {id: "user1"}, values: {balance: 750}}) + user2: account_put(put: {key: {id: "user2"}, values: {balance: 1250}}) +} +``` + +Note that a transaction started with GraphQL has a timeout of 1 minute and will be aborted automatically when it exceeds the timeout. + +### Commit a transaction + +To commit the continued transaction, specify both the `id` and the `commit: true` flag as arguments of the `@transaction` directive. + +```graphql +query GetAndCommit @transaction(id: "c88da8a6-a13f-4857-82fe-45f1ab4150f9", commit: true) { + user1: account_get(get: {key: {id: "user1"}}) { + account { balance } + } + user2: account_get(get: {key: {id: "user2"}}) { + account { balance } + } +} +``` + +Note: If you specify a `commit: true` flag without an `id` argument like `@transaction(commit: true)`, a new transaction is started and committed just for one operation. This is exactly the same as not specifying the `@transaction` directive, as seen in the above examples using GraphiQL. In other words, you can omit the directive itself when it is `@transaction(commit: true)`. + +### Abort/Rollback a transaction + +When you need to abort/rollback a transaction explicitly, you can use the `abort` or `rollback` mutation fields interchangeably (both have the same effect and usage). Note that you cannot mix it with any other operations, so you must specify it alone. + +```graphql +mutation AbortTx @transaction(id: "c88da8a6-a13f-4857-82fe-45f1ab4150f9") { + abort +} +``` +or +```graphql +mutation RollbackTx @transaction(id: "c88da8a6-a13f-4857-82fe-45f1ab4150f9") { + rollback +} +``` \ No newline at end of file diff --git a/docs/3.12/scalardb-graphql/how-to-run-server.md b/docs/3.12/scalardb-graphql/how-to-run-server.md new file mode 100644 index 00000000..87ebe76d --- /dev/null +++ b/docs/3.12/scalardb-graphql/how-to-run-server.md @@ -0,0 +1,71 @@ +# How to Run ScalarDB GraphQL Server + +ScalarDB GraphQL Server is an interface layer that allows client applications to communicate with [ScalarDB](https://github.com/scalar-labs/scalardb) with GraphQL. + +## Configure + +In addition to the properties in the [ScalarDB database.properties](https://github.com/scalar-labs/scalardb/blob/master/conf/database.properties) file, the GraphQL server reads the following: + +* `scalar.db.graphql.port` ... Port number for the GraphQL server. The default is `8080`. +* `scalar.db.graphql.path` ... Path component of the URL of the GraphQL endpoint. The default is `/graphql`. +* `scalar.db.graphql.namespaces` ... Comma-separated list of namespaces of tables for which the GraphQL server generates a schema. Note that at least one namespace is required. +* `scalar.db.graphql.graphiql` ... Whether the GraphQL server serves [GraphiQL](https://github.com/graphql/graphiql) IDE. The default is `true`. +* `scalar.db.graphql.schema_checking_interval_millis` ... The interval at which the GraphQL server will rebuild the GraphQL schema if any change is detected in the ScalarDB schema. + The default interval value is `30000` (30 seconds). Besides, this automatic schema rebuild can be disabled by setting the property value to `-1`. + Refer to the [following section](#creating-or-modifying-the-scalardb-schema-when-the-server-is-running) for more details. + +## Run + +We provide a Docker container that contains ScalarDB GraphQL Server (only to enterprise license users). +With the container, You can run ScalarDB GraphQL Server as follows (you need to specify your local configuration file path with `-v` flag): + +```console +docker run -d -p 8080:8080 \ + -v :/scalardb-graphql/database.properties \ + ghcr.io/scalar-labs/scalardb-graphql: + +# For DEBUG logging +docker run -d -p 8080:8080 \ + -v :/scalardb-graphql/database.properties \ + -e SCALAR_DB_GRAPHQL_LOG_LEVEL=DEBUG \ + ghcr.io/scalar-labs/scalardb-graphql: +``` + +You can also pass the database settings via environment variables: + +```console +docker run -d -p 8080:8080 \ + -e SCALAR_DB_CONTACT_POINTS=cassandra \ + -e SCALAR_DB_CONTACT_PORT=9042 \ + -e SCALAR_DB_USERNAME=cassandra \ + -e SCALAR_DB_PASSWORD=cassandra \ + -e SCALAR_DB_STORAGE=cassandra \ + -e SCALAR_DB_TRANSACTION_MANAGER=consensus-commit \ + -e SCALAR_DB_GRAPHQL_PATH=/graphql \ + -e SCALAR_DB_GRAPHQL_NAMESPACES=namespace1,namespace2 \ + -e SCALAR_DB_GRAPHQL_GRAPHIQL=true \ + -e SCALAR_DB_GRAPHQL_LOG_LEVEL=INFO \ + ghcr.io/scalar-labs/scalardb-graphql: +``` + + +### Creating or modifying the ScalarDB schema when the server is running + +Since the GraphQL schema is statically built at server startup, if the ScalarDB schema is modified (e.g., a table is added, altered or deleted) then the corresponding GraphQL schema +won't reflect the changes unless it is rebuilt. To address this, the GraphQL server provides the following two mechanisms: + +#### Periodic check + +The server periodically checks if changes in the ScalarDB schema occur and rebuilds the corresponding GraphQL schema +if necessary. By default, the checking occurs every 30 seconds, but the interval can be configured with the `scalar.db.graphql.schema_checking_interval_millis` property. +Besides, this periodic check can be disabled by setting the property value to `-1`. + +#### On-demand check + +We can also request the server to check changes in the ScalarDB schema and rebuild the corresponding GraphQL schema if necessary by performing a POST request to the `/update-graphql-schema` endpoint of the HTTP API. + +For example, if the HTTP API is running on `localhost` on port `8080` and the `scalar.db.graphql.path` property is set to `/graphql`. This endpoint can be called with : + +```console + curl -X POST http://localhost:8080/graphql/update-graphql-schema +``` \ No newline at end of file diff --git a/docs/3.12/scalardb-graphql/how-to-run-two-phase-commit-transaction.md b/docs/3.12/scalardb-graphql/how-to-run-two-phase-commit-transaction.md new file mode 100644 index 00000000..b00aef25 --- /dev/null +++ b/docs/3.12/scalardb-graphql/how-to-run-two-phase-commit-transaction.md @@ -0,0 +1,151 @@ +# How to run two-phase commit transaction + +ScalarDB GraphQL supports two-phase commit style transactions +called [Two-phase Commit Transactions](https://github.com/scalar-labs/scalardb/blob/master/docs/two-phase-commit-transactions.md). +With Two-phase Commit Transactions, you can execute a transaction that spans multiple +processes/applications (e.g., Microservices). +We name the application that starts a transaction "coordinator" while the applications that +join the transaction are named "participants". +Every two-phase commit operation requires annotating the mutation or query operation with +a `@twoPhaseCommit` directive. Below is a description of such operations. + +## Start a transaction + +To start a transaction, add the `@twoPhaseCommit` directive without setting parameters. + +```graphql +query some_query @twoPhaseCommit { + # some query +} +``` + +The transaction ID of the started transaction will be returned in the extensions object that is part +of the result. + +```json +{ + "data": { + ... + }, + "extensions": { + "transaction": { + "id": "the_transaction_id" + } + } +} +``` + +## Join a transaction (for participants) + +In a participant application, to join the transaction started by a coordinator application, set the +transaction ID with the `id` parameter and set the `join` parameter to true. + +```graphql +query some_query_from_participant @twoPhaseCommit(id:"the_transaction_id", join:true) { + # some query +} +``` + +## Resume a transaction + +To continue executing operations in the started or joined transaction, set the transaction ID value in +the `id` parameter of `@twoPhaseCommit` directive. + +```graphql +mutation some_mutation @twoPhaseCommit(id:"the_transaction_id") { + # some mutation +} +``` + +## Prepare, validate and commit a transaction + +After finishing the query and mutation operations, you need to commit the transaction. Like a +well-known +two-phase commit protocol, there are two phases: prepare and commit. +You first need to prepare the transaction in all the coordinator/participant applications, and then +you +need to commit the transaction in all the coordinator/participant applications. + +If the Consensus Commit transaction manager is configured with the `EXTRA_READ` serializable strategy +in `SERIALIZABLE` isolation level, an extra "validate" phase is required between prepare and +commit phases. +Similarly to prepare and commit, validate need to be executed in all the coordinator/participants +applications. + +Prepare, validate and commit can be executed in parallel with all the coordinator/participants +applications. + +### Prepare a transaction + +Two options are possible to prepare a two-phase commit transaction. + +#### Via the directive parameter + +By using the `prepare` parameter of the directive, the transaction will be prepared after the +execution of the operation fields and only if they do not raise an error. + +```graphql +mutation some_mutation_then_prepare_tx @twoPhaseCommit(id:"the_transaction_id", prepare:true) { + mutation1 : ... + mutation2 : ... + # the transaction will be prepared after the execution of the mutation1 and mutation2 fields +} +``` + +#### Via the mutation field + +Add a `prepare` field in a mutation operation. This field will trigger the transaction +preparation. + +```graphql +mutation prepare_tx @twoPhaseCommit(id:"the_transaction_id") { + prepare +} +``` + +### Validate a transaction + +Add a `validate` field in a mutation operation. This field will trigger the transaction +validation. + +```graphql +mutation validate_tx @twoPhaseCommit(id:"the_transaction_id") { + validate +} +``` + +### Commit a transaction + +Add a `commit` field in a mutation operation. This field will trigger the transaction commit. + +```graphql +mutation commit_tx @twoPhaseCommit(id:"the_transaction_id") { + commit +} +``` + +### Abort/Rollback a transaction + +When you need to abort/rollback a transaction explicitly, you can use the `abort` or `rollback` +mutation fields interchangeably (both have the same effect and usage). Note that you cannot mix it +with any other operations, so you must specify it alone. + +```graphql +mutation AbortTx @twoPhaseCommit(id: "the_transaction_id") { + abort +} +``` + +or + +```graphql +mutation RollbackTx @twoPhaseCommit(id: "the_transaction_id") { + rollback +} +``` + +## Error handling + +If an exception is thrown by a `@twoPhaseCommit` operation, ScalarDB GraphQL triggers a rollback procedure that recovers the transaction. +For more details about the exception handling in two-phase commit transaction, please refer to +the [exception handling guide for ScalarDB two-phase commit transaction](https://github.com/scalar-labs/scalardb/blob/master/docs/two-phase-commit-transactions.md#handle-exceptions). diff --git a/docs/3.12/scalardb-graphql/images/cluster-iam-role.png b/docs/3.12/scalardb-graphql/images/cluster-iam-role.png new file mode 100644 index 00000000..789aecaa Binary files /dev/null and b/docs/3.12/scalardb-graphql/images/cluster-iam-role.png differ diff --git a/docs/3.12/scalardb-graphql/images/iam-role-for-serviceaccount.png b/docs/3.12/scalardb-graphql/images/iam-role-for-serviceaccount.png new file mode 100644 index 00000000..3c2052dd Binary files /dev/null and b/docs/3.12/scalardb-graphql/images/iam-role-for-serviceaccount.png differ diff --git a/docs/3.12/scalardb-graphql/images/lb-controller-iam-policy.png b/docs/3.12/scalardb-graphql/images/lb-controller-iam-policy.png new file mode 100644 index 00000000..6b709d96 Binary files /dev/null and b/docs/3.12/scalardb-graphql/images/lb-controller-iam-policy.png differ diff --git a/docs/3.12/scalardb-graphql/images/node-iam-role.png b/docs/3.12/scalardb-graphql/images/node-iam-role.png new file mode 100644 index 00000000..3f039058 Binary files /dev/null and b/docs/3.12/scalardb-graphql/images/node-iam-role.png differ diff --git a/docs/3.12/scalardb-graphql/images/oidc-provider.png b/docs/3.12/scalardb-graphql/images/oidc-provider.png new file mode 100644 index 00000000..60615e6a Binary files /dev/null and b/docs/3.12/scalardb-graphql/images/oidc-provider.png differ diff --git a/docs/3.12/scalardb-samples/README.md b/docs/3.12/scalardb-samples/README.md new file mode 100644 index 00000000..2ace35a5 --- /dev/null +++ b/docs/3.12/scalardb-samples/README.md @@ -0,0 +1,17 @@ +--- +toc: false +--- + +# ScalarDB Samples + +This repository contains sample applications for [ScalarDB](https://github.com/scalar-labs/scalardb): + +- [ScalarDB Sample](scalardb-sample/README.md) +- [Multi-storage Transaction Sample](multi-storage-transaction-sample/README.md) +- [Microservice Transaction Sample](microservice-transaction-sample/README.md) +- [ScalarDB Analytics with PostgreSQL](scalardb-analytics-postgresql-sample/README.md) +- [ScalarDB GraphQL Sample](scalardb-graphql-sample/README.md) +- [ScalarDB SQL (JDBC) Sample](scalardb-sql-jdbc-sample/README.md) +- [Spring Data JDBC for ScalarDB Sample](spring-data-sample/README.md) +- [Spring Data JDBC for ScalarDB with Multi-storage Transaction Sample](spring-data-multi-storage-transaction-sample/README.md) +- [Spring Data JDBC for ScalarDB with Microservice Transaction Sample](spring-data-microservice-transaction-sample/README.md) diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/README.md b/docs/3.12/scalardb-samples/microservice-transaction-sample/README.md new file mode 100644 index 00000000..0dd630f4 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/README.md @@ -0,0 +1,541 @@ +# Create a Sample Application That Supports Microservice Transactions + +This tutorial describes how to create a sample application that supports microservice transactions in ScalarDB. + +## Overview + +The sample e-commerce application shows how users can order and pay for items by using a line of credit. The use case described in this tutorial is the same as the basic [ScalarDB sample](../scalardb-sample/README.md) but takes advantage of [transactions with a two-phase commit interface](https://github.com/scalar-labs/scalardb/tree/master/docs/two-phase-commit-transactions.md) when using ScalarDB. + +The sample application has two microservices called the *Customer Service* and the *Order Service* based on the [database-per-service pattern](https://microservices.io/patterns/data/database-per-service.html): + +- The **Customer Service** manages customer information, including line-of-credit information, credit limit, and credit total. +- The **Order Service** is responsible for order operations like placing an order and getting order histories. + +Each service has gRPC endpoints. Clients call the endpoints, and the services call each endpoint as well. + +The databases that you will be using in the sample application are Cassandra and MySQL. The Customer Service and the Order Service use Cassandra and MySQL, respectively, through ScalarDB. + +![Overview](images/overview.png) + +As shown in the diagram, both services access a small Coordinator database used for the Consensus Commit protocol. The database is service-independent and exists for managing transaction metadata for Consensus Commit in a highly available manner. + +In the sample application, for ease of setup and explanation, we co-locate the Coordinator database in the same Cassandra instance of the Order Service. Alternatively, you can manage the Coordinator database as a separate database. + +{% capture notice--info %} +**Note** + +Since the focus of the sample application is to demonstrate using ScalarDB, application-specific error handling, authentication processing, and similar functions are not included in the sample application. For details about exception handling in ScalarDB, see [How to handle exceptions](https://github.com/scalar-labs/scalardb/blob/master/docs/api-guide.md#how-to-handle-exceptions). + +Additionally, for the purpose of the sample application, each service has one container so that you can avoid using request routing between the services. However, for production use, because each service typically has multiple servers or hosts for scalability and availability, you should consider request routing between the services in transactions with a two-phase commit interface. For details about request routing, see [Request routing in transactions with a two-phase commit interface](https://github.com/scalar-labs/scalardb/blob/master/docs/two-phase-commit-transactions.md#request-routing-in-transactions-with-a-two-phase-commit-interface). +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Service endpoints + +The endpoints defined in the services are as follows: + +- Customer Service + - `getCustomerInfo` + - `payment` + - `prepare` + - `validate` + - `commit` + - `rollback` + - `repayment` + +- Order Service + - `placeOrder` + - `getOrder` + - `getOrders` + +### What you can do in this sample application + +The sample application supports the following types of transactions: + +- Get customer information through the `getCustomerInfo` endpoint of the Customer Service. +- Place an order by using a line of credit through the `placeOrder` endpoint of the Order Service and the `payment`, `prepare`, `validate`, `commit`, and `rollback` endpoints of the Customer Service. + - Checks if the cost of the order is below the customer's credit limit. + - If the check passes, records the order history and updates the amount the customer has spent. +- Get order information by order ID through the `getOrder` endpoint of the Order Service and the `getCustomerInfo`, `prepare`, `validate`, `commit`, and `rollback` endpoints of the Customer Service. +- Get order information by customer ID through the `getOrders` endpoint of the Order Service and the `getCustomerInfo`, `prepare`, `validate`, `commit`, and `rollback` endpoints of the Customer Service. +- Make a payment through the `repayment` endpoint of the Customer Service. + - Reduces the amount the customer has spent. + +{% capture notice--info %} +**Note** + +The `getCustomerInfo` endpoint works as a participant service endpoint when receiving a transaction ID from the coordinator. + +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Prerequisites + +- One of the following Java Development Kits (JDKs): + - [Oracle JDK](https://www.oracle.com/java/technologies/downloads/) LTS version (8, 11, or 17) + - [OpenJDK](https://openjdk.org/install/) LTS version (8, 11, or 17) +- [Docker](https://www.docker.com/get-started/) 20.10 or later with [Docker Compose](https://docs.docker.com/compose/install/) V2 or later + +{% capture notice--info %} +**Note** + +We recommend using the LTS versions mentioned above, but other non-LTS versions may work. + +In addition, other JDKs should work with ScalarDB, but we haven't tested them. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Set up ScalarDB + +The following sections describe how to set up the sample application that supports microservices transactions in ScalarDB. + +### Clone the ScalarDB samples repository + +Open **Terminal**, then clone the ScalarDB samples repository by running the following command: + +```console +$ git clone https://github.com/scalar-labs/scalardb-samples +``` + +Then, go to the directory that contains the sample application by running the following command: + +```console +$ cd scalardb-samples/microservice-transaction-sample +``` + +### Start Cassandra and MySQL + +Cassandra and MySQL are already configured for the sample application, as shown in [`database.properties`](database.properties). For details about configuring the multi-storage transactions feature in ScalarDB, see [How to configure ScalarDB to support multi-storage transactions](https://github.com/scalar-labs/scalardb/blob/master/docs/multi-storage-transactions.md#how-to-configure-scalardb-to-support-multi-storage-transactions). + +To start Cassandra and MySQL, which are included in the Docker container for the sample application, run the following command: + +```console +$ docker-compose up -d mysql cassandra +``` + +{% capture notice--info %} +**Note** + +Starting the Docker container may take more than one minute depending on your development environment. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Load the schema + +The database schema (the method in which the data will be organized) for the sample application has already been defined in [`customer-service-schema.json`](customer-service-schema.json) for the Customer Service and [`order-service-schema.json`](order-service-schema.json) for the Order Service. + +To apply the schema, go to the [ScalarDB Releases](https://github.com/scalar-labs/scalardb/releases) page and download the ScalarDB Schema Loader that matches the version of ScalarDB that you want to use to the `scalardb-samples/microservice-transaction-sample` folder. + +#### MySQL + +To load the schema for [`customer-service-schema.json`](customer-service-schema.json) into MySQL, run the following command, replacing `` with the version of the ScalarDB Schema Loader that you downloaded: + +```console +$ java -jar scalardb-schema-loader-.jar --config database-mysql.properties --schema-file customer-service-schema.json +``` + +#### Cassandra + +To load the schema for [`order-service-schema.json`](order-service-schema.json) into Cassandra, run the following command, replacing `` with the version of the ScalarDB Schema Loader that you downloaded: + +```console +$ java -jar scalardb-schema-loader-.jar --config database-cassandra.properties --schema-file order-service-schema.json --coordinator +``` + +#### Schema details + +As shown in [`customer-service-schema.json`](customer-service-schema.json) for the sample application, all the tables for the Customer Service are created in the `customer_service` namespace. + +- `customer_service.customers`: a table that manages customers' information + - `credit_limit`: the maximum amount of money a lender will allow each customer to spend when using a line of credit + - `credit_total`: the amount of money that each customer has already spent by using their line of credit + +As shown in [`order-service-schema.json`](order-service-schema.json) for the sample application, all the tables for the Order Service are created in the `order_service` namespace. + +- `order_service.orders`: a table that manages order information +- `order_service.statements`: a table that manages order statement information +- `order_service.items`: a table that manages information of items to be ordered + +The Entity Relationship Diagram for the schema is as follows: + +![ERD](images/ERD.png) + +### Load the initial data by starting the microservices + +Before starting the microservices, build the Docker images of the sample application by running the following command: + +```console +$ ./gradlew docker +``` + +Then, start the microservices by running the following command: + +```console +$ docker-compose up -d customer-service order-service +``` + +After starting the microservices and the initial data has loaded, the following records should be stored in the `customer_service.customers` table: + +| customer_id | name | credit_limit | credit_total | +|-------------|---------------|--------------|--------------| +| 1 | Yamada Taro | 10000 | 0 | +| 2 | Yamada Hanako | 10000 | 0 | +| 3 | Suzuki Ichiro | 10000 | 0 | + +And the following records should be stored in the `order_service.items` table: + +| item_id | name | price | +|---------|--------|-------| +| 1 | Apple | 1000 | +| 2 | Orange | 2000 | +| 3 | Grape | 2500 | +| 4 | Mango | 5000 | +| 5 | Melon | 3000 | + +## Execute transactions and retrieve data in the sample application + +The following sections describe how to execute transactions and retrieve data in the sample e-commerce application. + +### Get customer information + +Start with getting information about the customer whose ID is `1` by running the following command: + +```console +$ ./gradlew :client:run --args="GetCustomerInfo 1" +``` + +You should see the following output: + +```console +... +{"id": 1,"name": "Yamada Taro","credit_limit": 10000} +... +``` + +At this time, `credit_total` isn't shown, which means the current value of `credit_total` is `0`. + +### Place an order + +Then, have customer ID `1` place an order for three apples and two oranges by running the following command: + +{% capture notice--info %} +**Note** + +The order format in this command is `./gradlew run --args="PlaceOrder :,:,..."`. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +```console +$ ./gradlew :client:run --args="PlaceOrder 1 1:3,2:2" +``` + +You should see a similar output as below, with a different UUID for `order_id`, which confirms that the order was successful: + +```console +... +{"order_id": "4ccdb21c-ac03-4b48-bcb7-cad57eac1e79"} +... +``` + +### Check order details + +Check details about the order by running the following command, replacing `` with the UUID for the `order_id` that was shown after running the previous command: + +```console +$ ./gradlew :client:run --args="GetOrder " +``` + +You should see a similar output as below, with different UUIDs for `order_id` and `timestamp`: + +```console +... +{"order": {"order_id": "4ccdb21c-ac03-4b48-bcb7-cad57eac1e79","timestamp": 1631605253126,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 1,"item_name": "Apple","price": 1000,"count": 3,"total": 3000},{"item_id": 2,"item_name": "Orange","price": 2000,"count": 2,"total": 4000}],"total": 7000}} +... +``` + +### Place another order + +Place an order for one melon that uses the remaining amount in `credit_total` for customer ID `1` by running the following command: + +```console +$ ./gradlew :client:run --args="PlaceOrder 1 5:1" +``` + +You should see a similar output as below, with a different UUID for `order_id`, which confirms that the order was successful: + +```console +... +{"order_id": "0b10db66-faa6-4323-8a7a-474e8534a7ee"} +... +``` + +### Check order history + +Get the history of all orders for customer ID `1` by running the following command: + +```console +$ ./gradlew :client:run --args="GetOrders 1" +``` + +You should see a similar output as below, with different UUIDs for `order_id` and `timestamp`, which shows the history of all orders for customer ID `1` in descending order by timestamp: + +```console +... +{"order": [{"order_id": "0b10db66-faa6-4323-8a7a-474e8534a7ee","timestamp": 1631605501485,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 5,"item_name": "Melon","price": 3000,"count": 1,"total": 3000}],"total": 3000},{"order_id": "4ccdb21c-ac03-4b48-bcb7-cad57eac1e79","timestamp": 1631605253126,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 1,"item_name": "Apple","price": 1000,"count": 3,"total": 3000},{"item_id": 2,"item_name": "Orange","price": 2000,"count": 2,"total": 4000}],"total": 7000}]} +... +``` + +### Check credit total + +Get the credit total for customer ID `1` by running the following command: + +```console +$ ./gradlew :client:run --args="GetCustomerInfo 1" +``` + +You should see the following output, which shows that customer ID `1` has reached their `credit_limit` in `credit_total` and cannot place anymore orders: + +```console +... +{"id": 1,"name": "Yamada Taro","credit_limit": 10000,"credit_total": 10000} +... +``` + +Try to place an order for one grape and one mango by running the following command: + +```console +$ ./gradlew :client:run --args="PlaceOrder 1 3:1,4:1" +``` + +You should see the following output, which shows that the order failed because the `credit_total` amount would exceed the `credit_limit` amount: + +```console +... +io.grpc.StatusRuntimeException: FAILED_PRECONDITION: Credit limit exceeded + at io.grpc.stub.ClientCalls.toStatusRuntimeException(ClientCalls.java:271) + at io.grpc.stub.ClientCalls.getUnchecked(ClientCalls.java:252) + at io.grpc.stub.ClientCalls.blockingUnaryCall(ClientCalls.java:165) + at sample.rpc.OrderServiceGrpc$OrderServiceBlockingStub.placeOrder(OrderServiceGrpc.java:296) + at sample.client.command.PlaceOrderCommand.call(PlaceOrderCommand.java:38) + at sample.client.command.PlaceOrderCommand.call(PlaceOrderCommand.java:12) + at picocli.CommandLine.executeUserObject(CommandLine.java:2041) + at picocli.CommandLine.access$1500(CommandLine.java:148) + at picocli.CommandLine$RunLast.executeUserObjectOfLastSubcommandWithSameParent(CommandLine.java:2461) + at picocli.CommandLine$RunLast.handle(CommandLine.java:2453) + at picocli.CommandLine$RunLast.handle(CommandLine.java:2415) + at picocli.CommandLine$AbstractParseResultHandler.execute(CommandLine.java:2273) + at picocli.CommandLine$RunLast.execute(CommandLine.java:2417) + at picocli.CommandLine.execute(CommandLine.java:2170) + at sample.client.Client.main(Client.java:39) +... +``` + +### Make a payment + +To continue making orders, customer ID `1` must make a payment to reduce the `credit_total` amount. + +Make a payment by running the following command: + +```console +$ ./gradlew :client:run --args="Repayment 1 8000" +``` + +Then, check the `credit_total` amount for customer ID `1` by running the following command: + +```console +$ ./gradlew :client:run --args="GetCustomerInfo 1" +``` + +You should see the following output, which shows that a payment was applied to customer ID `1`, reducing the `credit_total` amount: + +```console +... +{"id": 1,"name": "Yamada Taro","credit_limit": 10000,"credit_total": 2000} +... +``` + +Now that customer ID `1` has made a payment, place an order for one grape and one melon by running the following command: + +```console +$ ./gradlew :client:run --args="PlaceOrder 1 3:1,4:1" +``` + +You should see a similar output as below, with a different UUID for `order_id`, which confirms that the order was successful: + +```console +... +{"order_id": "dd53dd9d-aaf4-41db-84b2-56951fed6425"} +... +``` + +## Stop the sample application + +To stop the sample application, you need to stop the Docker containers that are running Cassandra, MySQL, and the microservices. To stop the Docker containers, run the following command: + +```console +$ docker-compose down +``` + +## Reference - How the microservice transaction is achieved + +The transactions for placing an order, getting a single order, and getting the history of orders achieve the microservice transaction. This section focuses on how the transactions that span the Customer Service and the Order Service are implemented by placing an order as an example. + +The following sequence diagram shows the transaction for placing an order: + +![Microservice transaction sequence diagram](images/sequence_diagram.png) + +### 1. Transaction with a two-phase commit interface is started + +When a client sends a request to place an order to the Order Service, `OrderService.placeOrder()` is called, and the microservice transaction starts. + +At first, the Order Service starts a transaction with a two-phase commit interface with `start()` as follows. For reference, see [`OrderService.java`](order-service/src/main/java/sample/order/OrderService.java). + +```java +transaction = twoPhaseCommitTransactionManager.start(); +``` + +### 2. CRUD operations are executed + +After the transaction with a two-phase commit interface starts, CRUD operations are executed. The Order Service puts the order information in the `order_service.orders` table and the detailed information in the `order_service.statements` table as follows. For reference, see [`OrderService.java`](order-service/src/main/java/sample/order/OrderService.java): + +```java +// Put the order info into the `orders` table. +Order.put(transaction, orderId, request.getCustomerId(), System.currentTimeMillis()); + +int amount = 0; +for (ItemOrder itemOrder : request.getItemOrderList()) { + // Put the order statement into the `statements` table. + Statement.put(transaction, orderId, itemOrder.getItemId(), itemOrder.getCount()); + + // Retrieve the item info from the `items` table. + Optional item = Item.get(transaction, itemOrder.getItemId()); + if (!item.isPresent()) { + responseObserver.onError( + Status.NOT_FOUND.withDescription("Item not found").asRuntimeException()); + return; + } + + // Calculate the total amount. + amount += item.get().price * itemOrder.getCount(); +} +``` + +Then, the Order Service calls the `payment` gRPC endpoint of the Customer Service along with the transaction ID. For reference, see [`OrderService.java`](order-service/src/main/java/sample/order/OrderService.java). + +```java +customerServiceStub.payment( + PaymentRequest.newBuilder() + .setTransactionId(transactionId) + .setCustomerId(customerId) + .setAmount(amount) + .build()); +``` + +The `payment` endpoint of the Customer Service first joins the transaction with `join()` as follows. For reference, see [`CustomerService.java`](customer-service/src/main/java/sample/customer/CustomerService.java). + +```java +private void execOperationsAsParticipant(String funcName, String transactionId, + TransactionFunction operations, + StreamObserver responseObserver) { + try { + // Join the transaction + TwoPhaseCommitTransaction transaction = twoPhaseCommitTransactionManager.join(transactionId); + + // Execute operations + T response = operations.apply(transaction); +``` + +The endpoint then gets the customer information and checks if the customer's credit total exceeds the credit limit after the payment. If the credit total does not exceed the credit limit, the endpoint updates the customer's credit total. For reference, see [`CustomerService.java`](customer-service/src/main/java/sample/customer/CustomerService.java). + +```java +execOperationsAsParticipant("Payment", request.getTransactionId(), + transaction -> { + // Retrieve the customer info for the customer ID + Optional result = Customer.get(transaction, request.getCustomerId()); + if (!result.isPresent()) { + throw Status.NOT_FOUND.withDescription("Customer not found").asRuntimeException(); + } + + int updatedCreditTotal = result.get().creditTotal + request.getAmount(); + + // Check if the credit total exceeds the credit limit after payment + if (updatedCreditTotal > result.get().creditLimit) { + throw Status.FAILED_PRECONDITION + .withDescription("Credit limit exceeded") + .asRuntimeException(); + } + + // Update `credit_total` for the customer + Customer.updateCreditTotal(transaction, request.getCustomerId(), updatedCreditTotal); + + return Empty.getDefaultInstance(); + }, responseObserver +); +``` + +### 3. Transaction is committed by using the two-phase commit protocol + +After the Order Service receives the update that the payment succeeded, the Order Service tries to commit the transaction. + +To commit the transaction, the Order Service starts with preparing the transaction. The Order Service calls `prepare()` from its transaction object and calls the `prepare` gRPC endpoint of the Customer Service. For reference, see [`OrderService.java`](order-service/src/main/java/sample/order/OrderService.java): + +```java +transaction.prepare(); +callPrepareEndpoint(transaction.getId()); +``` + +In this endpoint, the Customer Service resumes the transaction and calls `prepare()` from its transaction object, as well. For reference, see [`CustomerService.java`](customer-service/src/main/java/sample/customer/CustomerService.java): + +```java +// Resume the transaction. +transaction = twoPhaseCommitTransactionManager.resume(request.getTransactionId()); + +// Prepare the transaction. +transaction.prepare(); +``` + +Similarly, the Order Service and the Customer Service call `validate()` from their transaction objects. For reference, see [`OrderService.java`](order-service/src/main/java/sample/order/OrderService.java) and [`CustomerService.java`](customer-service/src/main/java/sample/customer/CustomerService.java). For details about `validate()`, see [Validate the transaction](https://github.com/scalar-labs/scalardb/blob/master/docs/two-phase-commit-transactions.md#validate-the-transaction). + +After preparing and validating the transaction succeeds in both the Order Service and the Customer Service, the transaction can be committed. In this case, the Order Service calls `commit()` from its transaction object and then calls the `commit` gRPC endpoint of the Customer Service. For reference, see [`OrderService.java`](order-service/src/main/java/sample/order/OrderService.java). + +```java +transaction.commit(); +callCommitEndpoint(transaction.getId()); +``` + +In this endpoint, the Customer Service resumes the transaction and calls `commit()` from its transaction object, as well. For reference, see [`CustomerService.java`](customer-service/src/main/java/sample/customer/CustomerService.java). + +```java +// Resume the transaction. +transaction = twoPhaseCommitTransactionManager.resume(request.getTransactionId()); + +// Commit the transaction. +transaction.commit(); +``` + +### Error handling + +If an error happens while executing a transaction, you will need to roll back the transaction. In this case, the Order Service calls `rollback()` from its transaction object and then calls the `rollback` gRPC endpoint of the Customer Service. For reference, see [`OrderService.java`](order-service/src/main/java/sample/order/OrderService.java). + +```java +transaction.rollback(); +callRollbackEndpoint(transaction.getId()); +``` + +In this endpoint, the Customer Service resumes the transaction and calls `rollback()` from its transaction object, as well. For reference, see [`CustomerService.java`](customer-service/src/main/java/sample/customer/CustomerService.java). + +```java +// Resume the transaction. +TwoPhaseCommitTransaction transaction = + twoPhaseCommitTransactionManager.resume(request.getTransactionId()); + +// Roll back the transaction. +transaction.rollback(); +``` + +For details about how to handle exceptions in ScalarDB, see [How to handle exceptions](https://github.com/scalar-labs/scalardb/blob/master/docs/api-guide.md#how-to-handle-exceptions). diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/build.gradle b/docs/3.12/scalardb-samples/microservice-transaction-sample/build.gradle new file mode 100644 index 00000000..36ec85d7 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/build.gradle @@ -0,0 +1,17 @@ +subprojects { + group = "sample" + project.version = '1.0' + + ext { + grpcVersion = '1.53.0' + protocVersion = '3.23.1' + scalarDbVersion = '3.9.1' + picoCliVersion = '4.7.1' + protobufJavaFormatVersion = '1.4' + log4jVersion = '2.20.0' + } + + repositories { + mavenCentral() + } +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/client/bin/main/sample/client/Client.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/bin/main/sample/client/Client.class new file mode 100644 index 00000000..c00f001c Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/bin/main/sample/client/Client.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/client/bin/main/sample/client/command/GetCustomerInfoCommand.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/bin/main/sample/client/command/GetCustomerInfoCommand.class new file mode 100644 index 00000000..ef1160d4 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/bin/main/sample/client/command/GetCustomerInfoCommand.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/client/bin/main/sample/client/command/GetOrderCommand.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/bin/main/sample/client/command/GetOrderCommand.class new file mode 100644 index 00000000..098395f3 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/bin/main/sample/client/command/GetOrderCommand.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/client/bin/main/sample/client/command/GetOrdersCommand.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/bin/main/sample/client/command/GetOrdersCommand.class new file mode 100644 index 00000000..3b85ef0f Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/bin/main/sample/client/command/GetOrdersCommand.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/client/bin/main/sample/client/command/PlaceOrderCommand.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/bin/main/sample/client/command/PlaceOrderCommand.class new file mode 100644 index 00000000..27876c40 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/bin/main/sample/client/command/PlaceOrderCommand.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/client/bin/main/sample/client/command/RepaymentCommand.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/bin/main/sample/client/command/RepaymentCommand.class new file mode 100644 index 00000000..986857b2 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/bin/main/sample/client/command/RepaymentCommand.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/client/bin/main/sample/client/command/Utils.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/bin/main/sample/client/command/Utils.class new file mode 100644 index 00000000..e1ca381e Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/bin/main/sample/client/command/Utils.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/client/build.gradle b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/build.gradle new file mode 100644 index 00000000..d9e03945 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/build.gradle @@ -0,0 +1,19 @@ +plugins { + id 'java' + id 'application' +} + +dependencies { + implementation project(':rpc') + implementation "info.picocli:picocli:${picoCliVersion}" + implementation "com.googlecode.protobuf-java-format:protobuf-java-format:${protobufJavaFormatVersion}" +} + +application { + mainClassName = 'sample.client.Client' +} + +archivesBaseName = "sample-order-service" + +sourceCompatibility = 1.8 +targetCompatibility = 1.8 diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/client/src/main/java/sample/client/Client.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/src/main/java/sample/client/Client.java new file mode 100644 index 00000000..02c2a2c5 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/src/main/java/sample/client/Client.java @@ -0,0 +1,41 @@ +package sample.client; + +import sample.client.command.GetCustomerInfoCommand; +import sample.client.command.GetOrderCommand; +import sample.client.command.GetOrdersCommand; +import sample.client.command.PlaceOrderCommand; +import sample.client.command.RepaymentCommand; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Option; + +@Command( + name = "bin/client", + description = "Sample application for Microservice Transaction", + subcommands = { + PlaceOrderCommand.class, + GetOrderCommand.class, + GetOrdersCommand.class, + GetCustomerInfoCommand.class, + RepaymentCommand.class + }) +public class Client implements Runnable { + + @Option( + names = {"-h", "--help"}, + usageHelp = true, + description = "Displays this help message and quits.", + defaultValue = "true") + private Boolean showHelp; + + @Override + public void run() { + if (showHelp) { + CommandLine.usage(this, System.out); + } + } + + public static void main(String[] args) { + new CommandLine(new Client()).execute(args); + } +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/client/src/main/java/sample/client/command/GetCustomerInfoCommand.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/src/main/java/sample/client/command/GetCustomerInfoCommand.java new file mode 100644 index 00000000..b8d03293 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/src/main/java/sample/client/command/GetCustomerInfoCommand.java @@ -0,0 +1,35 @@ +package sample.client.command; + +import io.grpc.ManagedChannel; +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.rpc.CustomerServiceGrpc; +import sample.rpc.GetCustomerInfoRequest; +import sample.rpc.GetCustomerInfoResponse; + +@Command(name = "GetCustomerInfo", description = "Get customer information") +public class GetCustomerInfoCommand implements Callable { + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Override + public Integer call() { + ManagedChannel channel = Utils.getCustomerServiceChannel(); + try { + CustomerServiceGrpc.CustomerServiceBlockingStub stub = + CustomerServiceGrpc.newBlockingStub(channel); + GetCustomerInfoResponse response = + stub.getCustomerInfo( + GetCustomerInfoRequest.newBuilder().setCustomerId(customerId).build()); + Utils.printJsonString(response); + return 0; + } catch (Exception e) { + e.printStackTrace(); + return 1; + } finally { + Utils.shutdownChannel(channel); + } + } +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/client/src/main/java/sample/client/command/GetOrderCommand.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/src/main/java/sample/client/command/GetOrderCommand.java new file mode 100644 index 00000000..7af8ff1c --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/src/main/java/sample/client/command/GetOrderCommand.java @@ -0,0 +1,33 @@ +package sample.client.command; + +import io.grpc.ManagedChannel; +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.rpc.GetOrderRequest; +import sample.rpc.GetOrderResponse; +import sample.rpc.OrderServiceGrpc; + +@Command(name = "GetOrder", description = "Get order information by order ID") +public class GetOrderCommand implements Callable { + + @Parameters(index = "0", paramLabel = "ORDER_ID", description = "order ID") + private String orderId; + + @Override + public Integer call() { + ManagedChannel channel = Utils.getOrderServiceChannel(); + try { + OrderServiceGrpc.OrderServiceBlockingStub stub = OrderServiceGrpc.newBlockingStub(channel); + GetOrderResponse response = + stub.getOrder(GetOrderRequest.newBuilder().setOrderId(orderId).build()); + Utils.printJsonString(response); + return 0; + } catch (Exception e) { + e.printStackTrace(); + return 1; + } finally { + Utils.shutdownChannel(channel); + } + } +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/client/src/main/java/sample/client/command/GetOrdersCommand.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/src/main/java/sample/client/command/GetOrdersCommand.java new file mode 100644 index 00000000..430518f8 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/src/main/java/sample/client/command/GetOrdersCommand.java @@ -0,0 +1,33 @@ +package sample.client.command; + +import io.grpc.ManagedChannel; +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.rpc.GetOrdersRequest; +import sample.rpc.GetOrdersResponse; +import sample.rpc.OrderServiceGrpc; + +@Command(name = "GetOrders", description = "Get order information by customer ID") +public class GetOrdersCommand implements Callable { + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Override + public Integer call() { + ManagedChannel channel = Utils.getOrderServiceChannel(); + try { + OrderServiceGrpc.OrderServiceBlockingStub stub = OrderServiceGrpc.newBlockingStub(channel); + GetOrdersResponse response = + stub.getOrders(GetOrdersRequest.newBuilder().setCustomerId(customerId).build()); + Utils.printJsonString(response); + return 0; + } catch (Exception e) { + e.printStackTrace(); + return 1; + } finally { + Utils.shutdownChannel(channel); + } + } +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/client/src/main/java/sample/client/command/PlaceOrderCommand.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/src/main/java/sample/client/command/PlaceOrderCommand.java new file mode 100644 index 00000000..5c0c8223 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/src/main/java/sample/client/command/PlaceOrderCommand.java @@ -0,0 +1,49 @@ +package sample.client.command; + +import io.grpc.ManagedChannel; +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.rpc.ItemOrder; +import sample.rpc.OrderServiceGrpc; +import sample.rpc.PlaceOrderRequest; +import sample.rpc.PlaceOrderResponse; + +@Command(name = "PlaceOrder", description = "Place an order") +public class PlaceOrderCommand implements Callable { + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Parameters( + index = "1", + paramLabel = "ORDERS", + description = "orders. The format is \":,:,...\"") + private String orders; + + @Override + public Integer call() { + ManagedChannel channel = Utils.getOrderServiceChannel(); + try { + OrderServiceGrpc.OrderServiceBlockingStub stub = OrderServiceGrpc.newBlockingStub(channel); + + PlaceOrderRequest.Builder builder = PlaceOrderRequest.newBuilder().setCustomerId(customerId); + for (String order : orders.split(",", -1)) { + String[] s = order.split(":", -1); + int itemId = Integer.parseInt(s[0]); + int count = Integer.parseInt(s[1]); + builder.addItemOrder(ItemOrder.newBuilder().setItemId(itemId).setCount(count).build()); + } + + PlaceOrderResponse response = stub.placeOrder(builder.build()); + + Utils.printJsonString(response); + return 0; + } catch (Exception e) { + e.printStackTrace(); + return 1; + } finally { + Utils.shutdownChannel(channel); + } + } +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/client/src/main/java/sample/client/command/RepaymentCommand.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/src/main/java/sample/client/command/RepaymentCommand.java new file mode 100644 index 00000000..fa447abc --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/src/main/java/sample/client/command/RepaymentCommand.java @@ -0,0 +1,38 @@ +package sample.client.command; + +import com.google.protobuf.Empty; +import io.grpc.ManagedChannel; +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.rpc.CustomerServiceGrpc; +import sample.rpc.RepaymentRequest; + +@Command(name = "Repayment", description = "Repayment") +public class RepaymentCommand implements Callable { + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Parameters(index = "1", paramLabel = "AMOUNT", description = "amount of the money for repayment") + private int amount; + + @Override + public Integer call() { + ManagedChannel channel = Utils.getCustomerServiceChannel(); + try { + CustomerServiceGrpc.CustomerServiceBlockingStub stub = + CustomerServiceGrpc.newBlockingStub(channel); + Empty response = + stub.repayment( + RepaymentRequest.newBuilder().setCustomerId(customerId).setAmount(amount).build()); + Utils.printJsonString(response); + return 0; + } catch (Exception e) { + e.printStackTrace(); + return 1; + } finally { + Utils.shutdownChannel(channel); + } + } +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/client/src/main/java/sample/client/command/Utils.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/src/main/java/sample/client/command/Utils.java new file mode 100644 index 00000000..2594563f --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/client/src/main/java/sample/client/command/Utils.java @@ -0,0 +1,34 @@ +package sample.client.command; + +import com.google.protobuf.Message; +import com.googlecode.protobuf.format.JsonFormat; +import io.grpc.ManagedChannel; +import io.grpc.netty.NettyChannelBuilder; +import java.util.concurrent.TimeUnit; + +public final class Utils { + + private static final JsonFormat JSON_FORMAT = new JsonFormat(); + + private Utils() {} + + public static ManagedChannel getCustomerServiceChannel() { + return NettyChannelBuilder.forAddress("localhost", 10010).usePlaintext().build(); + } + + public static ManagedChannel getOrderServiceChannel() { + return NettyChannelBuilder.forAddress("localhost", 10020).usePlaintext().build(); + } + + public static void shutdownChannel(ManagedChannel channel) { + try { + channel.shutdown().awaitTermination(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + System.err.println("failed to shutdown the channel"); + } + } + + public static void printJsonString(Message message) { + System.out.println(JSON_FORMAT.printToString(message)); + } +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service-schema.json b/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service-schema.json new file mode 100644 index 00000000..6ed73ddb --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service-schema.json @@ -0,0 +1,14 @@ +{ + "customer_service.customers": { + "transaction": true, + "partition-key": [ + "customer_id" + ], + "columns": { + "customer_id": "INT", + "name": "TEXT", + "credit_limit": "INT", + "credit_total": "INT" + } + } +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/Dockerfile b/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/Dockerfile new file mode 100644 index 00000000..ea0686ae --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/Dockerfile @@ -0,0 +1,14 @@ +FROM docker.io/busybox:1.32 AS tools +FROM openjdk:8u292-jre-slim + +WORKDIR / + +ADD customer-service.tar . + +WORKDIR /customer-service + +COPY customer-service.properties database.properties + +ENTRYPOINT ["./bin/customer-service", "--config", "database.properties"] + +EXPOSE 10010 diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/bin/main/sample/customer/CustomerService.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/bin/main/sample/customer/CustomerService.class new file mode 100644 index 00000000..3afe634f Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/bin/main/sample/customer/CustomerService.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/bin/main/sample/customer/CustomerServiceServer.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/bin/main/sample/customer/CustomerServiceServer.class new file mode 100644 index 00000000..cc823d8f Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/bin/main/sample/customer/CustomerServiceServer.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/bin/main/sample/customer/model/Customer.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/bin/main/sample/customer/model/Customer.class new file mode 100644 index 00000000..27c7d95c Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/bin/main/sample/customer/model/Customer.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/build.gradle b/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/build.gradle new file mode 100644 index 00000000..06cef10f --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/build.gradle @@ -0,0 +1,42 @@ +plugins { + id 'java' + id 'java-library-distribution' + id 'application' + id 'com.palantir.docker' version '0.25.0' +} + +dependencies { + implementation project(':rpc') + implementation "com.scalar-labs:scalardb:${scalarDbVersion}" + implementation "info.picocli:picocli:${picoCliVersion}" + implementation "org.apache.logging.log4j:log4j-api:${log4jVersion}" + implementation "org.apache.logging.log4j:log4j-core:${log4jVersion}" + implementation "org.apache.logging.log4j:log4j-slf4j-impl:${log4jVersion}" +} + +application { + mainClassName = 'sample.customer.CustomerServiceServer' +} + +docker { + name "sample-customer-service:${project.version}" + files tasks.distTar.outputs, 'customer-service.properties' +} + +distTar { + archiveFileName = "${project.name}.tar" + duplicatesStrategy DuplicatesStrategy.EXCLUDE +} + +distZip { + duplicatesStrategy DuplicatesStrategy.EXCLUDE +} + +installDist { + duplicatesStrategy DuplicatesStrategy.EXCLUDE +} + +archivesBaseName = "sample-customer-service" + +sourceCompatibility = 1.8 +targetCompatibility = 1.8 diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/customer-service.properties b/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/customer-service.properties new file mode 100644 index 00000000..5ab819c3 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/customer-service.properties @@ -0,0 +1,17 @@ +scalar.db.storage=multi-storage +scalar.db.multi_storage.storages=cassandra,mysql + +# Cassandra for the transaction tables +scalar.db.multi_storage.storages.cassandra.storage=cassandra +scalar.db.multi_storage.storages.cassandra.contact_points=cassandra +scalar.db.multi_storage.storages.cassandra.username=cassandra +scalar.db.multi_storage.storages.cassandra.password=cassandra + +# MySQL for the customer service tables +scalar.db.multi_storage.storages.mysql.storage=jdbc +scalar.db.multi_storage.storages.mysql.contact_points=jdbc:mysql://mysql:3306/ +scalar.db.multi_storage.storages.mysql.username=root +scalar.db.multi_storage.storages.mysql.password=mysql + +scalar.db.multi_storage.namespace_mapping=coordinator:cassandra +scalar.db.multi_storage.default_storage=mysql diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/src/main/java/sample/customer/CustomerService.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/src/main/java/sample/customer/CustomerService.java new file mode 100644 index 00000000..c3bb5a89 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/src/main/java/sample/customer/CustomerService.java @@ -0,0 +1,318 @@ +package sample.customer; + +import com.google.protobuf.Empty; +import com.scalar.db.api.DistributedTransaction; +import com.scalar.db.api.DistributedTransactionManager; +import com.scalar.db.api.TransactionCrudOperable; +import com.scalar.db.api.TwoPhaseCommitTransaction; +import com.scalar.db.api.TwoPhaseCommitTransactionManager; +import com.scalar.db.exception.transaction.AbortException; +import com.scalar.db.exception.transaction.CrudException; +import com.scalar.db.exception.transaction.TransactionException; +import com.scalar.db.exception.transaction.TransactionNotFoundException; +import com.scalar.db.service.TransactionFactory; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.grpc.stub.StreamObserver; +import java.io.Closeable; +import java.io.IOException; +import java.util.Optional; +import javax.annotation.Nullable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import sample.customer.model.Customer; +import sample.rpc.CommitRequest; +import sample.rpc.CustomerServiceGrpc; +import sample.rpc.GetCustomerInfoRequest; +import sample.rpc.GetCustomerInfoResponse; +import sample.rpc.PaymentRequest; +import sample.rpc.PrepareRequest; +import sample.rpc.RepaymentRequest; +import sample.rpc.RollbackRequest; +import sample.rpc.ValidateRequest; + +public class CustomerService extends CustomerServiceGrpc.CustomerServiceImplBase + implements Closeable { + private static final Logger logger = LoggerFactory.getLogger(CustomerService.class); + + // For normal transactions + private final DistributedTransactionManager transactionManager; + + // For two-phase commit transactions + private final TwoPhaseCommitTransactionManager twoPhaseCommitTransactionManager; + + private interface TransactionFunction { + R apply(T t) throws TransactionException; + } + + public CustomerService(String configFile) throws TransactionException, IOException { + // Initialize the transaction managers + TransactionFactory factory = TransactionFactory.create(configFile); + transactionManager = factory.getTransactionManager(); + twoPhaseCommitTransactionManager = factory.getTwoPhaseCommitTransactionManager(); + + loadInitialData(); + } + + private void loadInitialData() throws TransactionException { + DistributedTransaction transaction = null; + try { + transaction = transactionManager.start(); + loadCustomerIfNotExists(transaction, 1, "Yamada Taro", 10000, 0); + loadCustomerIfNotExists(transaction, 2, "Yamada Hanako", 10000, 0); + loadCustomerIfNotExists(transaction, 3, "Suzuki Ichiro", 10000, 0); + transaction.commit(); + } catch (TransactionException e) { + logger.error("Loading initial data failed", e); + abortTransaction(transaction); + throw e; + } + } + + private void loadCustomerIfNotExists( + DistributedTransaction transaction, int id, String name, int creditLimit, int creditTotal) + throws CrudException { + Optional customer = Customer.get(transaction, id); + if (!customer.isPresent()) { + Customer.put(transaction, id, name, creditLimit, creditTotal); + } + } + + @Override + public void getCustomerInfo( + GetCustomerInfoRequest request, StreamObserver responseObserver) { + String funcName = "Getting customer info"; + // This function processing operations can be used in both normal transaction and two-phase + // interface transaction. + TransactionFunction operations = + transaction -> { + // Retrieve the customer info for the specified customer ID + Optional result = Customer.get(transaction, request.getCustomerId()); + + if (!result.isPresent()) { + // If the customer info the specified customer ID doesn't exist, throw an exception + throw Status.NOT_FOUND.withDescription("Customer not found").asRuntimeException(); + } + + // Return the customer info + return GetCustomerInfoResponse.newBuilder() + .setId(result.get().id) + .setName(result.get().name) + .setCreditLimit(result.get().creditLimit) + .setCreditTotal(result.get().creditTotal) + .build(); + }; + + if (request.hasTransactionId()) { + execOperationsAsParticipant(funcName, request.getTransactionId(), operations, responseObserver); + } else { + execOperations(funcName, operations, responseObserver); + } + } + + @Override + public void repayment(RepaymentRequest request, StreamObserver responseObserver) { + execOperations("Repayment", + transaction -> { + // Retrieve the customer info for the specified customer ID + Optional result = Customer.get(transaction, request.getCustomerId()); + if (!result.isPresent()) { + // If the customer info the specified customer ID doesn't exist, throw an exception + throw Status.NOT_FOUND.withDescription("Customer not found").asRuntimeException(); + } + + int updatedCreditTotal = result.get().creditTotal - request.getAmount(); + + // Check if over repayment or not + if (updatedCreditTotal < 0) { + throw Status.FAILED_PRECONDITION.withDescription("Over repayment").asRuntimeException(); + } + + // Reduce credit_total for the customer + Customer.updateCreditTotal(transaction, request.getCustomerId(), updatedCreditTotal); + + return Empty.getDefaultInstance(); + }, responseObserver); + } + + private void abortTransaction(@Nullable DistributedTransaction transaction) { + if (transaction == null) { + return; + } + try { + transaction.abort(); + } catch (AbortException e) { + logger.warn("Abort failed", e); + } + } + + @Override + public void payment(PaymentRequest request, StreamObserver responseObserver) { + execOperationsAsParticipant("Payment", request.getTransactionId(), + transaction -> { + // Retrieve the customer info for the customer ID + Optional result = Customer.get(transaction, request.getCustomerId()); + if (!result.isPresent()) { + throw Status.NOT_FOUND.withDescription("Customer not found").asRuntimeException(); + } + + int updatedCreditTotal = result.get().creditTotal + request.getAmount(); + + // Check if the credit total exceeds the credit limit after payment + if (updatedCreditTotal > result.get().creditLimit) { + throw Status.FAILED_PRECONDITION + .withDescription("Credit limit exceeded") + .asRuntimeException(); + } + + // Update credit_total for the customer + Customer.updateCreditTotal(transaction, request.getCustomerId(), updatedCreditTotal); + + return Empty.getDefaultInstance(); + }, responseObserver + ); + } + + @Override + public void prepare(PrepareRequest request, StreamObserver responseObserver) { + try { + // Resume the transaction + TwoPhaseCommitTransaction transaction = + twoPhaseCommitTransactionManager.resume(request.getTransactionId()); + + // Prepare the transaction + transaction.prepare(); + + responseObserver.onNext(Empty.getDefaultInstance()); + responseObserver.onCompleted(); + } catch (Exception e) { + String message = "Prepare failed"; + logger.error(message, e); + responseObserver.onError( + Status.INTERNAL.withDescription(message).withCause(e).asRuntimeException()); + } + } + + @Override + public void validate(ValidateRequest request, StreamObserver responseObserver) { + try { + // Resume the transaction + TwoPhaseCommitTransaction transaction = + twoPhaseCommitTransactionManager.resume(request.getTransactionId()); + + // Validate the transaction + transaction.validate(); + + responseObserver.onNext(Empty.getDefaultInstance()); + responseObserver.onCompleted(); + } catch (Exception e) { + String message = "Validate failed"; + logger.error(message, e); + responseObserver.onError( + Status.INTERNAL.withDescription(message).withCause(e).asRuntimeException()); + } + } + + @Override + public void commit(CommitRequest request, StreamObserver responseObserver) { + try { + // Resume the transaction + TwoPhaseCommitTransaction transaction = + twoPhaseCommitTransactionManager.resume(request.getTransactionId()); + + // Commit the transaction + transaction.commit(); + + responseObserver.onNext(Empty.getDefaultInstance()); + responseObserver.onCompleted(); + } catch (Exception e) { + String message = "Commit failed"; + logger.error(message, e); + responseObserver.onError( + Status.INTERNAL.withDescription(message).withCause(e).asRuntimeException()); + } + } + + @Override + public void rollback(RollbackRequest request, StreamObserver responseObserver) { + try { + // Resume the transaction + TwoPhaseCommitTransaction transaction = + twoPhaseCommitTransactionManager.resume(request.getTransactionId()); + + // Rollback the transaction + transaction.rollback(); + + responseObserver.onNext(Empty.getDefaultInstance()); + responseObserver.onCompleted(); + } catch (TransactionNotFoundException e) { + // If the transaction is not found, ignore it + responseObserver.onNext(Empty.getDefaultInstance()); + responseObserver.onCompleted(); + } catch (Exception e) { + String message = "Rollback failed"; + logger.error(message, e); + responseObserver.onError( + Status.INTERNAL.withDescription(message).withCause(e).asRuntimeException()); + } + } + + private void execOperations(String funcName, + TransactionFunction operations, StreamObserver responseObserver) { + DistributedTransaction transaction = null; + try { + // Start a transaction + transaction = transactionManager.start(); + + // Execute operations + T response = operations.apply(transaction); + + // Commit the transaction (even when the transaction is read-only, we need to commit) + transaction.commit(); + + // Return the response + responseObserver.onNext(response); + responseObserver.onCompleted(); + } catch (StatusRuntimeException e) { + logger.error("{} failed", funcName, e); + abortTransaction(transaction); + responseObserver.onError(e); + } catch (Exception e) { + String message = funcName + " failed"; + logger.error(message, e); + abortTransaction(transaction); + responseObserver.onError( + Status.INTERNAL.withDescription(message).withCause(e).asRuntimeException()); + } + } + + private void execOperationsAsParticipant(String funcName, String transactionId, + TransactionFunction operations, + StreamObserver responseObserver) { + try { + // Join the transaction + TwoPhaseCommitTransaction transaction = twoPhaseCommitTransactionManager.join(transactionId); + + // Execute operations + T response = operations.apply(transaction); + + // Return the response + responseObserver.onNext(response); + responseObserver.onCompleted(); + } catch (StatusRuntimeException e) { + logger.error("{} failed", funcName, e); + responseObserver.onError(e); + } catch (Exception e) { + String message = funcName + " failed"; + logger.error(message, e); + responseObserver.onError( + Status.INTERNAL.withDescription(message).withCause(e).asRuntimeException()); + } + } + + @Override + public void close() { + transactionManager.close(); + twoPhaseCommitTransactionManager.close(); + } +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/src/main/java/sample/customer/CustomerServiceServer.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/src/main/java/sample/customer/CustomerServiceServer.java new file mode 100644 index 00000000..94967666 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/src/main/java/sample/customer/CustomerServiceServer.java @@ -0,0 +1,74 @@ +package sample.customer; + +import io.grpc.Server; +import io.grpc.ServerBuilder; +import java.util.concurrent.Callable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import picocli.CommandLine; +import picocli.CommandLine.Command; + +@Command(name = "customer-service-server", description = "Starts Customer Service server.") +public class CustomerServiceServer implements Callable { + private static final Logger logger = LoggerFactory.getLogger(CustomerServiceServer.class); + + private static final int PORT = 10010; + + @CommandLine.Option( + names = {"--config"}, + required = true, + paramLabel = "PROPERTIES_FILE", + description = "A configuration file in properties format.") + private String configFile; + + private CustomerService service; + private Server server; + + @Override + public Integer call() throws Exception { + addShutdownHook(); + start(); + blockUntilShutdown(); + return 0; + } + + public void start() throws Exception { + service = new CustomerService(configFile); + server = ServerBuilder.forPort(PORT).addService(service).build().start(); + logger.info("Customer Service server started, listening on " + PORT); + } + + public void addShutdownHook() { + Runtime.getRuntime() + .addShutdownHook( + new Thread( + () -> { + logger.info("Signal received. Shutting down the server ..."); + shutdown(); + blockUntilShutdown(); + service.close(); + logger.info("The server shut down"); + })); + } + + private void blockUntilShutdown() { + if (server != null) { + try { + server.awaitTermination(); + } catch (InterruptedException ignored) { + // don't need to handle InterruptedException + } + } + } + + private void shutdown() { + if (server != null) { + server.shutdown(); + } + } + + public static void main(String[] args) { + int exitCode = new CommandLine(new CustomerServiceServer()).execute(args); + System.exit(exitCode); + } +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/src/main/java/sample/customer/model/Customer.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/src/main/java/sample/customer/model/Customer.java new file mode 100644 index 00000000..e241f1a8 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/src/main/java/sample/customer/model/Customer.java @@ -0,0 +1,72 @@ +package sample.customer.model; + +import com.scalar.db.api.Get; +import com.scalar.db.api.Put; +import com.scalar.db.api.TransactionCrudOperable; +import com.scalar.db.exception.transaction.CrudException; +import com.scalar.db.io.Key; +import java.util.Optional; + +public class Customer { + private static final String NAMESPACE = "customer_service"; + private static final String TABLE = "customers"; + private static final String COL_CUSTOMER_ID = "customer_id"; + private static final String COL_NAME = "name"; + private static final String COL_CREDIT_LIMIT = "credit_limit"; + private static final String COL_CREDIT_TOTAL = "credit_total"; + + public final int id; + public final String name; + public final int creditLimit; + public final int creditTotal; + + public Customer(int id, String name, int creditLimit, int creditTotal) { + this.id = id; + this.name = name; + this.creditLimit = creditLimit; + this.creditTotal = creditTotal; + } + + public static void put( + TransactionCrudOperable transaction, int id, String name, int creditLimit, int creditTotal) + throws CrudException { + transaction.put( + Put.newBuilder() + .namespace(NAMESPACE) + .table(TABLE) + .partitionKey(Key.ofInt(COL_CUSTOMER_ID, id)) + .textValue(COL_NAME, name) + .intValue(COL_CREDIT_LIMIT, creditLimit) + .intValue(COL_CREDIT_TOTAL, creditTotal) + .build()); + } + + public static void updateCreditTotal(TransactionCrudOperable transaction, int id, int creditTotal) + throws CrudException { + transaction.put( + Put.newBuilder() + .namespace(NAMESPACE) + .table(TABLE) + .partitionKey(Key.ofInt(COL_CUSTOMER_ID, id)) + .intValue(COL_CREDIT_TOTAL, creditTotal) + .build()); + } + + public static Optional get(TransactionCrudOperable transaction, int id) + throws CrudException { + return transaction + .get( + Get.newBuilder() + .namespace(NAMESPACE) + .table(TABLE) + .partitionKey(Key.ofInt(COL_CUSTOMER_ID, id)) + .build()) + .map( + r -> + new Customer( + r.getInt(COL_CUSTOMER_ID), + r.getText(COL_NAME), + r.getInt(COL_CREDIT_LIMIT), + r.getInt(COL_CREDIT_TOTAL))); + } +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/src/main/resources/log4j2.properties b/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/src/main/resources/log4j2.properties new file mode 100644 index 00000000..6210ad0f --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/customer-service/src/main/resources/log4j2.properties @@ -0,0 +1,6 @@ +appender.console.type=Console +appender.console.name=STDOUT +appender.console.layout.type=PatternLayout +appender.console.layout.pattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n +rootLogger.level=info +rootLogger.appenderRef.stdout.ref=STDOUT diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/database-cassandra.properties b/docs/3.12/scalardb-samples/microservice-transaction-sample/database-cassandra.properties new file mode 100644 index 00000000..a44993ae --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/database-cassandra.properties @@ -0,0 +1,4 @@ +scalar.db.storage=cassandra +scalar.db.contact_points=localhost +scalar.db.username=cassandra +scalar.db.password=cassandra diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/database-mysql.properties b/docs/3.12/scalardb-samples/microservice-transaction-sample/database-mysql.properties new file mode 100644 index 00000000..d1389915 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/database-mysql.properties @@ -0,0 +1,4 @@ +scalar.db.storage=jdbc +scalar.db.contact_points=jdbc:mysql://localhost:3306/ +scalar.db.username=root +scalar.db.password=mysql diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/docker-compose.yml b/docs/3.12/scalardb-samples/microservice-transaction-sample/docker-compose.yml new file mode 100644 index 00000000..9705c966 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/docker-compose.yml @@ -0,0 +1,46 @@ +version: "3.5" +services: + mysql: + image: mysql:8.0 + environment: + MYSQL_ROOT_PASSWORD: mysql + container_name: "mysql-1" + ports: + - "3306:3306" + networks: + - sample-network + cassandra: + image: cassandra:3.11 + container_name: "cassandra-1" + ports: + - "9042:9042" + networks: + - sample-network + customer-service: + image: sample-customer-service:1.0 + container_name: "customer-service-1" + depends_on: + - mysql + - cassandra + entrypoint: /bin/bash + command: ./bin/customer-service --config database.properties + restart: "always" + ports: + - "10010:10010" + networks: + - sample-network + order-service: + image: sample-order-service:1.0 + container_name: "order-service-1" + depends_on: + - cassandra + entrypoint: /bin/bash + command: ./bin/order-service --config database.properties + restart: "always" + ports: + - "10020:10020" + networks: + - sample-network +networks: + sample-network: + name: sample-network diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/gradle/wrapper/gradle-wrapper.jar b/docs/3.12/scalardb-samples/microservice-transaction-sample/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 00000000..7454180f Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/gradle/wrapper/gradle-wrapper.jar differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/gradle/wrapper/gradle-wrapper.properties b/docs/3.12/scalardb-samples/microservice-transaction-sample/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 00000000..070cb702 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,5 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-7.6-bin.zip +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/gradlew b/docs/3.12/scalardb-samples/microservice-transaction-sample/gradlew new file mode 100755 index 00000000..744e882e --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/gradlew @@ -0,0 +1,185 @@ +#!/usr/bin/env sh + +# +# Copyright 2015 the original author or authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn () { + echo "$*" +} + +die () { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MSYS* | MINGW* ) + msys=true + ;; + NONSTOP* ) + nonstop=true + ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin or MSYS, switch paths to Windows format before running java +if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=`expr $i + 1` + done + case $i in + 0) set -- ;; + 1) set -- "$args0" ;; + 2) set -- "$args0" "$args1" ;; + 3) set -- "$args0" "$args1" "$args2" ;; + 4) set -- "$args0" "$args1" "$args2" "$args3" ;; + 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Escape application args +save () { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " +} +APP_ARGS=`save "$@"` + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +exec "$JAVACMD" "$@" diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/gradlew.bat b/docs/3.12/scalardb-samples/microservice-transaction-sample/gradlew.bat new file mode 100644 index 00000000..107acd32 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/gradlew.bat @@ -0,0 +1,89 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto execute + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/images/ERD.png b/docs/3.12/scalardb-samples/microservice-transaction-sample/images/ERD.png new file mode 100644 index 00000000..c0468efa Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/images/ERD.png differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/images/overview.png b/docs/3.12/scalardb-samples/microservice-transaction-sample/images/overview.png new file mode 100644 index 00000000..4340b4f5 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/images/overview.png differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/images/sequence_diagram.png b/docs/3.12/scalardb-samples/microservice-transaction-sample/images/sequence_diagram.png new file mode 100644 index 00000000..0317b5f3 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/images/sequence_diagram.png differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service-schema.json b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service-schema.json new file mode 100644 index 00000000..29c99907 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service-schema.json @@ -0,0 +1,44 @@ +{ + "order_service.orders": { + "transaction": true, + "partition-key": [ + "customer_id" + ], + "clustering-key": [ + "timestamp" + ], + "secondary-index": [ + "order_id" + ], + "columns": { + "order_id": "TEXT", + "customer_id": "INT", + "timestamp": "BIGINT" + } + }, + "order_service.statements": { + "transaction": true, + "partition-key": [ + "order_id" + ], + "clustering-key": [ + "item_id" + ], + "columns": { + "order_id": "TEXT", + "item_id": "INT", + "count": "INT" + } + }, + "order_service.items": { + "transaction": true, + "partition-key": [ + "item_id" + ], + "columns": { + "item_id": "INT", + "name": "TEXT", + "price": "INT" + } + } +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/Dockerfile b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/Dockerfile new file mode 100644 index 00000000..85461994 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/Dockerfile @@ -0,0 +1,14 @@ +FROM docker.io/busybox:1.32 AS tools +FROM openjdk:8u292-jre-slim + +WORKDIR / + +ADD order-service.tar . + +WORKDIR /order-service + +COPY order-service.properties database.properties + +ENTRYPOINT ["./bin/sample-order-service", "--config", "database.properties"] + +EXPOSE 10020 diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/bin/main/sample/order/OrderService.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/bin/main/sample/order/OrderService.class new file mode 100644 index 00000000..8df17876 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/bin/main/sample/order/OrderService.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/bin/main/sample/order/OrderServiceServer.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/bin/main/sample/order/OrderServiceServer.class new file mode 100644 index 00000000..f9b233ad Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/bin/main/sample/order/OrderServiceServer.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/bin/main/sample/order/model/Item.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/bin/main/sample/order/model/Item.class new file mode 100644 index 00000000..05a18483 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/bin/main/sample/order/model/Item.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/bin/main/sample/order/model/Order.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/bin/main/sample/order/model/Order.class new file mode 100644 index 00000000..2e45d0a3 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/bin/main/sample/order/model/Order.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/bin/main/sample/order/model/Statement.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/bin/main/sample/order/model/Statement.class new file mode 100644 index 00000000..1a3f36c3 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/bin/main/sample/order/model/Statement.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/build.gradle b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/build.gradle new file mode 100644 index 00000000..6a9bbf3c --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/build.gradle @@ -0,0 +1,42 @@ +plugins { + id 'java' + id 'java-library-distribution' + id 'application' + id 'com.palantir.docker' version '0.25.0' +} + +dependencies { + implementation project(':rpc') + implementation "com.scalar-labs:scalardb:${scalarDbVersion}" + implementation "info.picocli:picocli:${picoCliVersion}" + implementation "org.apache.logging.log4j:log4j-api:${log4jVersion}" + implementation "org.apache.logging.log4j:log4j-core:${log4jVersion}" + implementation "org.apache.logging.log4j:log4j-slf4j-impl:${log4jVersion}" +} + +application { + mainClassName = 'sample.order.OrderServiceServer' +} + +docker { + name "sample-order-service:${project.version}" + files tasks.distTar.outputs, 'order-service.properties' +} + +distTar { + archiveFileName = "${project.name}.tar" + duplicatesStrategy DuplicatesStrategy.EXCLUDE +} + +distZip { + duplicatesStrategy DuplicatesStrategy.EXCLUDE +} + +installDist { + duplicatesStrategy DuplicatesStrategy.EXCLUDE +} + +archivesBaseName = "sample-order-service" + +sourceCompatibility = 1.8 +targetCompatibility = 1.8 diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/order-service.properties b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/order-service.properties new file mode 100644 index 00000000..5d17c33b --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/order-service.properties @@ -0,0 +1,5 @@ +# Cassandra for the transaction tables and the order service tables +scalar.db.contact_points=cassandra +scalar.db.username=cassandra +scalar.db.password=cassandra +scalar.db.storage=cassandra diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/src/main/java/sample/order/OrderService.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/src/main/java/sample/order/OrderService.java new file mode 100644 index 00000000..60fcd5e8 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/src/main/java/sample/order/OrderService.java @@ -0,0 +1,355 @@ +package sample.order; + +import com.scalar.db.api.DistributedTransaction; +import com.scalar.db.api.DistributedTransactionManager; +import com.scalar.db.api.TwoPhaseCommitTransaction; +import com.scalar.db.api.TwoPhaseCommitTransactionManager; +import com.scalar.db.exception.transaction.AbortException; +import com.scalar.db.exception.transaction.CrudException; +import com.scalar.db.exception.transaction.RollbackException; +import com.scalar.db.exception.transaction.TransactionException; +import com.scalar.db.service.TransactionFactory; +import io.grpc.ManagedChannel; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.grpc.netty.NettyChannelBuilder; +import io.grpc.stub.StreamObserver; +import java.io.Closeable; +import java.io.IOException; +import java.util.List; +import java.util.Optional; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import javax.annotation.Nullable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import sample.order.model.Item; +import sample.order.model.Order; +import sample.order.model.Statement; +import sample.rpc.CommitRequest; +import sample.rpc.CustomerServiceGrpc; +import sample.rpc.GetCustomerInfoRequest; +import sample.rpc.GetCustomerInfoResponse; +import sample.rpc.GetOrderRequest; +import sample.rpc.GetOrderResponse; +import sample.rpc.GetOrdersRequest; +import sample.rpc.GetOrdersResponse; +import sample.rpc.ItemOrder; +import sample.rpc.OrderServiceGrpc; +import sample.rpc.PaymentRequest; +import sample.rpc.PlaceOrderRequest; +import sample.rpc.PlaceOrderResponse; +import sample.rpc.PrepareRequest; +import sample.rpc.RollbackRequest; +import sample.rpc.ValidateRequest; + +public class OrderService extends OrderServiceGrpc.OrderServiceImplBase implements Closeable { + private static final Logger logger = LoggerFactory.getLogger(OrderService.class); + + // For normal transactions + private final DistributedTransactionManager transactionManager; + + // For two-phase commit transactions + private final TwoPhaseCommitTransactionManager twoPhaseCommitTransactionManager; + + // For gRPC connection to Customer service + private final ManagedChannel channel; + private final CustomerServiceGrpc.CustomerServiceBlockingStub customerServiceStub; + + private interface TransactionFunction { + R apply(T t) throws TransactionException; + } + + public OrderService(String configFile) throws TransactionException, IOException { + // Initialize the transaction managers + TransactionFactory factory = TransactionFactory.create(configFile); + transactionManager = factory.getTransactionManager(); + twoPhaseCommitTransactionManager = factory.getTwoPhaseCommitTransactionManager(); + + // Initialize the gRPC connection to Customer service + channel = NettyChannelBuilder.forAddress("customer-service", 10010).usePlaintext().build(); + customerServiceStub = CustomerServiceGrpc.newBlockingStub(channel); + + loadInitialData(); + } + + private void loadInitialData() throws TransactionException { + DistributedTransaction transaction = null; + try { + transaction = transactionManager.start(); + loadItemIfNotExists(transaction, 1, "Apple", 1000); + loadItemIfNotExists(transaction, 2, "Orange", 2000); + loadItemIfNotExists(transaction, 3, "Grape", 2500); + loadItemIfNotExists(transaction, 4, "Mango", 5000); + loadItemIfNotExists(transaction, 5, "Melon", 3000); + transaction.commit(); + } catch (TransactionException e) { + logger.error("Loading initial data failed", e); + abortTransaction(transaction); + throw e; + } + } + + private void loadItemIfNotExists( + DistributedTransaction transaction, int id, String name, int price) throws CrudException { + Optional item = Item.get(transaction, id); + if (!item.isPresent()) { + Item.put(transaction, id, name, price); + } + } + + /** Place an order. It's a transaction that spans OrderService and CustomerService */ + @Override + public void placeOrder( + PlaceOrderRequest request, StreamObserver responseObserver) { + execOperationsAsCoordinator("Placing an order", + transaction -> { + String orderId = UUID.randomUUID().toString(); + + // Put the order info into the orders table + Order.put(transaction, orderId, request.getCustomerId(), System.currentTimeMillis()); + + int amount = 0; + for (ItemOrder itemOrder : request.getItemOrderList()) { + // Put the order statement into the statements table + Statement.put(transaction, orderId, itemOrder.getItemId(), itemOrder.getCount()); + + // Retrieve the item info from the items table + Optional item = Item.get(transaction, itemOrder.getItemId()); + if (!item.isPresent()) { + throw Status.NOT_FOUND.withDescription("Item not found").asRuntimeException(); + } + + // Calculate the total amount + amount += item.get().price * itemOrder.getCount(); + } + + // Call the payment endpoint of Customer service + callPaymentEndpoint(transaction.getId(), request.getCustomerId(), amount); + + return PlaceOrderResponse.newBuilder().setOrderId(orderId).build(); + }, responseObserver + ); + } + + private void rollbackTransaction(@Nullable TwoPhaseCommitTransaction transaction) { + if (transaction == null) { + return; + } + + try { + transaction.rollback(); + } catch (RollbackException ex) { + logger.warn("Rollback failed", ex); + } + try { + callRollbackEndpoint(transaction.getId()); + } catch (StatusRuntimeException ex) { + logger.warn("Rollback failed", ex); + } + } + + private void callPaymentEndpoint(String transactionId, int customerId, int amount) { + customerServiceStub.payment( + PaymentRequest.newBuilder() + .setTransactionId(transactionId) + .setCustomerId(customerId) + .setAmount(amount) + .build()); + } + + private void callPrepareEndpoint(String transactionId) { + customerServiceStub.prepare( + PrepareRequest.newBuilder().setTransactionId(transactionId).build()); + } + + private void callValidateEndpoint(String transactionId) { + customerServiceStub.validate( + ValidateRequest.newBuilder().setTransactionId(transactionId).build()); + } + + private void callCommitEndpoint(String transactionId) { + customerServiceStub.commit(CommitRequest.newBuilder().setTransactionId(transactionId).build()); + } + + private void callRollbackEndpoint(String transactionId) { + customerServiceStub.rollback( + RollbackRequest.newBuilder().setTransactionId(transactionId).build()); + } + + /** Get Order information by order ID */ + @Override + public void getOrder(GetOrderRequest request, StreamObserver responseObserver) { + execOperationsAsCoordinator("Getting an order", + transaction -> { + // Retrieve the order info for the specified order ID + Optional order = Order.getById(transaction, request.getOrderId()); + if (!order.isPresent()) { + throw Status.NOT_FOUND.withDescription("Order not found").asRuntimeException(); + } + + // Get the customer name from the Customer service + String customerName = getCustomerName(transaction.getId(), order.get().customerId); + + // Make an order protobuf to return + sample.rpc.Order rpcOrder = getOrder(transaction, order.get(), customerName); + + return GetOrderResponse.newBuilder().setOrder(rpcOrder).build(); + }, responseObserver + ); + } + + /** Get Order information by customer ID */ + @Override + public void getOrders( + GetOrdersRequest request, StreamObserver responseObserver) { + execOperationsAsCoordinator("Getting orders", + transaction -> { + // Retrieve the order info for the specified customer ID + List orders = Order.getByCustomerId(transaction, request.getCustomerId()); + + // Get the customer name from the Customer service + String customerName = getCustomerName(transaction.getId(), request.getCustomerId()); + + GetOrdersResponse.Builder builder = GetOrdersResponse.newBuilder(); + for (Order order : orders) { + // Make an order protobuf to return + sample.rpc.Order rpcOrder = getOrder(transaction, order, customerName); + builder.addOrder(rpcOrder); + } + + return builder.build(); + }, responseObserver + ); + } + + private sample.rpc.Order getOrder(TwoPhaseCommitTransaction transaction, Order order, + String customerName) + throws CrudException { + sample.rpc.Order.Builder orderBuilder = + sample.rpc.Order.newBuilder() + .setOrderId(order.id) + .setCustomerId(order.customerId) + .setCustomerName(customerName) + .setTimestamp(order.timestamp); + + int total = 0; + + // Retrieve the order statements for the order ID from the statements table + List statements = Statement.getByOrderId(transaction, order.id); + + // Make statements + for (Statement statement : statements) { + sample.rpc.Statement.Builder statementBuilder = sample.rpc.Statement.newBuilder(); + statementBuilder.setItemId(statement.itemId); + + // Retrieve the item data from the items table + Optional item = Item.get(transaction, statement.itemId); + if (!item.isPresent()) { + throw Status.NOT_FOUND.withDescription("Item not found").asRuntimeException(); + } + statementBuilder.setItemName(item.get().name); + statementBuilder.setPrice(item.get().price); + statementBuilder.setCount(statement.count); + + int itemTotal = item.get().price * statement.count; + statementBuilder.setTotal(itemTotal); + + orderBuilder.addStatement(statementBuilder); + + total += itemTotal; + } + + return orderBuilder.setTotal(total).build(); + } + + private String getCustomerName(String transactionId, int customerId) { + GetCustomerInfoResponse customerInfo = + customerServiceStub.getCustomerInfo( + GetCustomerInfoRequest.newBuilder() + .setTransactionId(transactionId) + .setCustomerId(customerId).build()); + return customerInfo.getName(); + } + + private void abortTransaction(@Nullable DistributedTransaction transaction) { + if (transaction == null) { + return; + } + try { + transaction.abort(); + } catch (AbortException e) { + logger.warn("Abort failed", e); + } + } + + private void execOperationsAsCoordinator(String funcName, + TransactionFunction operations, + StreamObserver responseObserver) { + TwoPhaseCommitTransaction transaction = null; + try { + // Start a two-phase commit interface transaction + transaction = twoPhaseCommitTransactionManager.start(); + + // Execute operations + T result = operations.apply(transaction); + + // Prepare the transaction + transaction.prepare(); + callPrepareEndpoint(transaction.getId()); + + // Validate the transaction. Depending on the concurrency control protocol, you need to call + // validate(). Currently, you need to call it when you use the Consensus Commit transaction + // manager and EXTRA_READ serializable strategy in SERIALIZABLE isolation level. In other + // cases, validate() does nothing. + transaction.validate(); + callValidateEndpoint(transaction.getId()); + + // Commit the transaction. If any of services succeed in committing the transaction, you can + // consider the transaction as committed. + boolean committed = false; + Exception exception = null; + try { + transaction.commit(); + committed = true; + } catch (TransactionException e) { + exception = e; + } + try { + callCommitEndpoint(transaction.getId()); + committed = true; + } catch (StatusRuntimeException e) { + exception = e; + } + if (!committed) { + throw exception; + } + + // Return the response + responseObserver.onNext(result); + responseObserver.onCompleted(); + } catch (StatusRuntimeException e) { + logger.error("{} failed", funcName, e); + rollbackTransaction(transaction); + responseObserver.onError(e); + } catch (Exception e) { + String message = funcName + " failed"; + logger.error(message, e); + rollbackTransaction(transaction); + responseObserver.onError( + Status.INTERNAL.withDescription(message).withCause(e).asRuntimeException()); + } + } + + @Override + public void close() { + try { + channel.shutdown().awaitTermination(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + logger.warn("Failed to shutdown the channel", e); + } + + transactionManager.close(); + twoPhaseCommitTransactionManager.close(); + } +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/src/main/java/sample/order/OrderServiceServer.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/src/main/java/sample/order/OrderServiceServer.java new file mode 100644 index 00000000..9d099d2d --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/src/main/java/sample/order/OrderServiceServer.java @@ -0,0 +1,74 @@ +package sample.order; + +import io.grpc.Server; +import io.grpc.ServerBuilder; +import java.util.concurrent.Callable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import picocli.CommandLine; +import picocli.CommandLine.Command; + +@Command(name = "order-service-server", description = "Starts Order Service server.") +public class OrderServiceServer implements Callable { + private static final Logger logger = LoggerFactory.getLogger(OrderServiceServer.class); + + private static final int PORT = 10020; + + @CommandLine.Option( + names = {"--config"}, + required = true, + paramLabel = "PROPERTIES_FILE", + description = "A configuration file in properties format.") + private String configFile; + + private OrderService service; + private Server server; + + @Override + public Integer call() throws Exception { + addShutdownHook(); + start(); + blockUntilShutdown(); + return 0; + } + + public void start() throws Exception { + service = new OrderService(configFile); + server = ServerBuilder.forPort(PORT).addService(service).build().start(); + logger.info("Order Service server started, listening on " + PORT); + } + + public void addShutdownHook() { + Runtime.getRuntime() + .addShutdownHook( + new Thread( + () -> { + logger.info("Signal received. Shutting down the server ..."); + shutdown(); + blockUntilShutdown(); + service.close(); + logger.info("The server shut down"); + })); + } + + private void blockUntilShutdown() { + if (server != null) { + try { + server.awaitTermination(); + } catch (InterruptedException ignored) { + // don't need to handle InterruptedException + } + } + } + + private void shutdown() { + if (server != null) { + server.shutdown(); + } + } + + public static void main(String[] args) { + int exitCode = new CommandLine(new OrderServiceServer()).execute(args); + System.exit(exitCode); + } +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/src/main/java/sample/order/model/Item.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/src/main/java/sample/order/model/Item.java new file mode 100644 index 00000000..9ea86692 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/src/main/java/sample/order/model/Item.java @@ -0,0 +1,51 @@ +package sample.order.model; + +import com.scalar.db.api.Get; +import com.scalar.db.api.Put; +import com.scalar.db.api.TransactionCrudOperable; +import com.scalar.db.exception.transaction.CrudException; +import com.scalar.db.io.Key; +import java.util.Optional; + +public class Item { + + private static final String NAMESPACE = "order_service"; + private static final String TABLE = "items"; + private static final String COL_ITEM_ID = "item_id"; + private static final String COL_NAME = "name"; + private static final String COL_PRICE = "price"; + + public final int id; + public final String name; + public final int price; + + public Item(int id, String name, int price) { + this.id = id; + this.name = name; + this.price = price; + } + + public static void put(TransactionCrudOperable transaction, int id, String name, int price) + throws CrudException { + transaction.put( + Put.newBuilder() + .namespace(NAMESPACE) + .table(TABLE) + .partitionKey(Key.ofInt(COL_ITEM_ID, id)) + .textValue(COL_NAME, name) + .intValue(COL_PRICE, price) + .build()); + } + + public static Optional get(TransactionCrudOperable transaction, int id) + throws CrudException { + return transaction + .get( + Get.newBuilder() + .namespace(NAMESPACE) + .table(TABLE) + .partitionKey(Key.ofInt(COL_ITEM_ID, id)) + .build()) + .map(r -> new Item(r.getInt(COL_ITEM_ID), r.getText(COL_NAME), r.getInt(COL_PRICE))); + } +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/src/main/java/sample/order/model/Order.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/src/main/java/sample/order/model/Order.java new file mode 100644 index 00000000..e0eb2d48 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/src/main/java/sample/order/model/Order.java @@ -0,0 +1,79 @@ +package sample.order.model; + +import com.scalar.db.api.Get; +import com.scalar.db.api.Put; +import com.scalar.db.api.Result; +import com.scalar.db.api.Scan; +import com.scalar.db.api.Scan.Ordering; +import com.scalar.db.api.TransactionCrudOperable; +import com.scalar.db.exception.transaction.CrudException; +import com.scalar.db.io.Key; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + +public class Order { + + private static final String NAMESPACE = "order_service"; + private static final String TABLE = "orders"; + private static final String COL_ORDER_ID = "order_id"; + private static final String COL_CUSTOMER_ID = "customer_id"; + private static final String COL_TIMESTAMP = "timestamp"; + + public final String id; + public final int customerId; + public final long timestamp; + + public Order(String id, int customerId, long timestamp) { + this.id = id; + this.customerId = customerId; + this.timestamp = timestamp; + } + + public static void put( + TransactionCrudOperable transaction, String id, int customerId, long timestamp) + throws CrudException { + transaction.put( + Put.newBuilder() + .namespace(NAMESPACE) + .table(TABLE) + .partitionKey(Key.ofInt(COL_CUSTOMER_ID, customerId)) + .clusteringKey(Key.ofBigInt(COL_TIMESTAMP, timestamp)) + .textValue(COL_ORDER_ID, id) + .build()); + } + + public static Optional getById(TransactionCrudOperable transaction, String id) + throws CrudException { + return transaction + .get( + Get.newBuilder() + .namespace(NAMESPACE) + .table(TABLE) + .indexKey(Key.ofText(COL_ORDER_ID, id)) + .build()) + .map(Order::resultToOrder); + } + + public static List getByCustomerId(TransactionCrudOperable transaction, int customerId) + throws CrudException { + return transaction + .scan( + Scan.newBuilder() + .namespace(NAMESPACE) + .table(TABLE) + .partitionKey(Key.ofInt(COL_CUSTOMER_ID, customerId)) + .ordering(Ordering.desc(COL_TIMESTAMP)) + .build()) + .stream() + .map(Order::resultToOrder) + .collect(Collectors.toList()); + } + + private static Order resultToOrder(Result result) { + return new Order( + result.getText(COL_ORDER_ID), + result.getInt(COL_CUSTOMER_ID), + result.getBigInt(COL_TIMESTAMP)); + } +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/src/main/java/sample/order/model/Statement.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/src/main/java/sample/order/model/Statement.java new file mode 100644 index 00000000..5e8c8698 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/src/main/java/sample/order/model/Statement.java @@ -0,0 +1,55 @@ +package sample.order.model; + +import com.scalar.db.api.Put; +import com.scalar.db.api.Scan; +import com.scalar.db.api.TransactionCrudOperable; +import com.scalar.db.exception.transaction.CrudException; +import com.scalar.db.io.Key; +import java.util.List; +import java.util.stream.Collectors; + +public class Statement { + + private static final String NAMESPACE = "order_service"; + private static final String TABLE = "statements"; + private static final String COL_ORDER_ID = "order_id"; + private static final String COL_ITEM_ID = "item_id"; + private static final String COL_COUNT = "count"; + + public final String orderId; + public final int itemId; + public final int count; + + public Statement(String orderId, int itemId, int count) { + this.orderId = orderId; + this.itemId = itemId; + this.count = count; + } + + public static void put(TransactionCrudOperable transaction, String orderId, int itemId, int count) + throws CrudException { + transaction.put( + Put.newBuilder() + .namespace(NAMESPACE) + .table(TABLE) + .partitionKey(Key.ofText(COL_ORDER_ID, orderId)) + .clusteringKey(Key.ofInt(COL_ITEM_ID, itemId)) + .intValue(COL_COUNT, count) + .build()); + } + + public static List getByOrderId(TransactionCrudOperable transaction, String orderId) + throws CrudException { + return transaction + .scan( + Scan.newBuilder() + .namespace(NAMESPACE) + .table(TABLE) + .partitionKey(Key.ofText(COL_ORDER_ID, orderId)) + .build()) + .stream() + .map( + r -> new Statement(r.getText(COL_ORDER_ID), r.getInt(COL_ITEM_ID), r.getInt(COL_COUNT))) + .collect(Collectors.toList()); + } +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/src/main/resources/log4j2.properties b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/src/main/resources/log4j2.properties new file mode 100644 index 00000000..6210ad0f --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/order-service/src/main/resources/log4j2.properties @@ -0,0 +1,6 @@ +appender.console.type=Console +appender.console.name=STDOUT +appender.console.layout.type=PatternLayout +appender.console.layout.pattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n +rootLogger.level=info +rootLogger.appenderRef.stdout.ref=STDOUT diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequest$1.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequest$1.class new file mode 100644 index 00000000..d855f97d Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequest$1.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequest$Builder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequest$Builder.class new file mode 100644 index 00000000..7f1a4d70 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequest$Builder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequest.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequest.class new file mode 100644 index 00000000..e1dab0fa Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequest.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequestOrBuilder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequestOrBuilder.class new file mode 100644 index 00000000..eb0a7908 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequestOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$1.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$1.class new file mode 100644 index 00000000..ce669993 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$1.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$2.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$2.class new file mode 100644 index 00000000..760334ef Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$2.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$3.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$3.class new file mode 100644 index 00000000..0596ed8a Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$3.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceBaseDescriptorSupplier.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceBaseDescriptorSupplier.class new file mode 100644 index 00000000..bc7b337d Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceBaseDescriptorSupplier.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceBlockingStub.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceBlockingStub.class new file mode 100644 index 00000000..97dfcec5 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceBlockingStub.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceFileDescriptorSupplier.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceFileDescriptorSupplier.class new file mode 100644 index 00000000..0973f8f5 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceFileDescriptorSupplier.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceFutureStub.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceFutureStub.class new file mode 100644 index 00000000..dd7bd77a Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceFutureStub.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceImplBase.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceImplBase.class new file mode 100644 index 00000000..2a488fcb Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceImplBase.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceMethodDescriptorSupplier.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceMethodDescriptorSupplier.class new file mode 100644 index 00000000..d6f08036 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceMethodDescriptorSupplier.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceStub.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceStub.class new file mode 100644 index 00000000..4bd9bd22 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceStub.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$MethodHandlers.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$MethodHandlers.class new file mode 100644 index 00000000..9e8b09a9 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$MethodHandlers.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc.class new file mode 100644 index 00000000..e4bdc4dd Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequest$1.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequest$1.class new file mode 100644 index 00000000..8877fdb1 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequest$1.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequest$Builder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequest$Builder.class new file mode 100644 index 00000000..d9e4d6d1 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequest$Builder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequest.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequest.class new file mode 100644 index 00000000..3c3cce48 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequest.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequestOrBuilder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequestOrBuilder.class new file mode 100644 index 00000000..f9298ab3 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequestOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponse$1.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponse$1.class new file mode 100644 index 00000000..d72bdc86 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponse$1.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponse$Builder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponse$Builder.class new file mode 100644 index 00000000..4580edfb Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponse$Builder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponse.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponse.class new file mode 100644 index 00000000..8179e701 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponse.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponseOrBuilder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponseOrBuilder.class new file mode 100644 index 00000000..a23bc85d Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponseOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequest$1.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequest$1.class new file mode 100644 index 00000000..859905fa Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequest$1.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequest$Builder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequest$Builder.class new file mode 100644 index 00000000..2d1da8ef Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequest$Builder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequest.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequest.class new file mode 100644 index 00000000..77a76406 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequest.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequestOrBuilder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequestOrBuilder.class new file mode 100644 index 00000000..6948966a Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequestOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponse$1.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponse$1.class new file mode 100644 index 00000000..6bb76939 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponse$1.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponse$Builder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponse$Builder.class new file mode 100644 index 00000000..9259e51a Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponse$Builder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponse.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponse.class new file mode 100644 index 00000000..c341a4e7 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponse.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponseOrBuilder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponseOrBuilder.class new file mode 100644 index 00000000..1367fbcd Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponseOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequest$1.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequest$1.class new file mode 100644 index 00000000..f94ef23d Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequest$1.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequest$Builder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequest$Builder.class new file mode 100644 index 00000000..f4531b1b Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequest$Builder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequest.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequest.class new file mode 100644 index 00000000..a60847f6 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequest.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequestOrBuilder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequestOrBuilder.class new file mode 100644 index 00000000..771201fc Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequestOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponse$1.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponse$1.class new file mode 100644 index 00000000..0327d415 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponse$1.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponse$Builder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponse$Builder.class new file mode 100644 index 00000000..1df0ac9d Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponse$Builder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponse.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponse.class new file mode 100644 index 00000000..a808d4e7 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponse.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponseOrBuilder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponseOrBuilder.class new file mode 100644 index 00000000..a373f857 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponseOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrder$1.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrder$1.class new file mode 100644 index 00000000..f9511e4f Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrder$1.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrder$Builder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrder$Builder.class new file mode 100644 index 00000000..b710ba05 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrder$Builder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrder.class new file mode 100644 index 00000000..bd758ac5 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrderOrBuilder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrderOrBuilder.class new file mode 100644 index 00000000..6385b10f Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrderOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/Order$1.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/Order$1.class new file mode 100644 index 00000000..22c87fa0 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/Order$1.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/Order$Builder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/Order$Builder.class new file mode 100644 index 00000000..45c3d1f1 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/Order$Builder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/Order.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/Order.class new file mode 100644 index 00000000..4fde3a9d Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/Order.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderOrBuilder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderOrBuilder.class new file mode 100644 index 00000000..7b00a6c7 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$1.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$1.class new file mode 100644 index 00000000..8f1311fd Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$1.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$2.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$2.class new file mode 100644 index 00000000..c71da053 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$2.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$3.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$3.class new file mode 100644 index 00000000..4b027e4a Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$3.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$MethodHandlers.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$MethodHandlers.class new file mode 100644 index 00000000..2473766c Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$MethodHandlers.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceBaseDescriptorSupplier.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceBaseDescriptorSupplier.class new file mode 100644 index 00000000..4cfb4c9a Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceBaseDescriptorSupplier.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceBlockingStub.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceBlockingStub.class new file mode 100644 index 00000000..36703f05 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceBlockingStub.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceFileDescriptorSupplier.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceFileDescriptorSupplier.class new file mode 100644 index 00000000..040167ce Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceFileDescriptorSupplier.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceFutureStub.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceFutureStub.class new file mode 100644 index 00000000..6fa17db6 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceFutureStub.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceImplBase.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceImplBase.class new file mode 100644 index 00000000..3dfa4604 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceImplBase.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceMethodDescriptorSupplier.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceMethodDescriptorSupplier.class new file mode 100644 index 00000000..63886a8d Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceMethodDescriptorSupplier.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceStub.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceStub.class new file mode 100644 index 00000000..39e33807 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceStub.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc.class new file mode 100644 index 00000000..68229915 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequest$1.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequest$1.class new file mode 100644 index 00000000..9f549c43 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequest$1.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequest$Builder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequest$Builder.class new file mode 100644 index 00000000..4ea3b665 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequest$Builder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequest.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequest.class new file mode 100644 index 00000000..3b487d42 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequest.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequestOrBuilder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequestOrBuilder.class new file mode 100644 index 00000000..62c247c8 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequestOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequest$1.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequest$1.class new file mode 100644 index 00000000..f5583db8 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequest$1.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequest$Builder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequest$Builder.class new file mode 100644 index 00000000..380db594 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequest$Builder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequest.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequest.class new file mode 100644 index 00000000..c14d1f99 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequest.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequestOrBuilder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequestOrBuilder.class new file mode 100644 index 00000000..1c4210eb Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequestOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponse$1.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponse$1.class new file mode 100644 index 00000000..589c8d2a Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponse$1.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponse$Builder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponse$Builder.class new file mode 100644 index 00000000..c044a174 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponse$Builder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponse.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponse.class new file mode 100644 index 00000000..dfae371e Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponse.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponseOrBuilder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponseOrBuilder.class new file mode 100644 index 00000000..3c4df9de Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponseOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequest$1.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequest$1.class new file mode 100644 index 00000000..7b13187c Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequest$1.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequest$Builder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequest$Builder.class new file mode 100644 index 00000000..75169c2e Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequest$Builder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequest.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequest.class new file mode 100644 index 00000000..404870b0 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequest.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequestOrBuilder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequestOrBuilder.class new file mode 100644 index 00000000..bc8e43ed Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequestOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequest$1.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequest$1.class new file mode 100644 index 00000000..8785bb15 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequest$1.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequest$Builder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequest$Builder.class new file mode 100644 index 00000000..d655e2dd Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequest$Builder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequest.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequest.class new file mode 100644 index 00000000..745aa208 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequest.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequestOrBuilder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequestOrBuilder.class new file mode 100644 index 00000000..9ec59308 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequestOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequest$1.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequest$1.class new file mode 100644 index 00000000..13845027 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequest$1.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequest$Builder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequest$Builder.class new file mode 100644 index 00000000..a4aa7c5c Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequest$Builder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequest.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequest.class new file mode 100644 index 00000000..3e89c92b Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequest.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequestOrBuilder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequestOrBuilder.class new file mode 100644 index 00000000..7e095b3e Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequestOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/Sample.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/Sample.class new file mode 100644 index 00000000..d540e237 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/Sample.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/Statement$1.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/Statement$1.class new file mode 100644 index 00000000..afb8a608 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/Statement$1.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/Statement$Builder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/Statement$Builder.class new file mode 100644 index 00000000..ac03388a Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/Statement$Builder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/Statement.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/Statement.class new file mode 100644 index 00000000..6ae7ab37 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/Statement.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/StatementOrBuilder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/StatementOrBuilder.class new file mode 100644 index 00000000..d90d8041 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/StatementOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequest$1.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequest$1.class new file mode 100644 index 00000000..3d56dd89 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequest$1.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequest$Builder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequest$Builder.class new file mode 100644 index 00000000..18fbfa59 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequest$Builder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequest.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequest.class new file mode 100644 index 00000000..8f51bc8f Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequest.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequestOrBuilder.class b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequestOrBuilder.class new file mode 100644 index 00000000..87e5cac3 Binary files /dev/null and b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequestOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/build.gradle b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/build.gradle new file mode 100644 index 00000000..c8f3a3d8 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/build.gradle @@ -0,0 +1,35 @@ +plugins { + id 'java' + id 'java-library-distribution' + id 'com.google.protobuf' version '0.9.1' +} + +dependencies { + api "io.grpc:grpc-netty:${grpcVersion}" + api "io.grpc:grpc-protobuf:${grpcVersion}" + api "io.grpc:grpc-stub:${grpcVersion}" + api "io.grpc:grpc-services:${grpcVersion}" + implementation "javax.annotation:javax.annotation-api:1.3.2" +} + +protobuf { + protoc { artifact = "com.google.protobuf:protoc:${protocVersion}" } + plugins { + grpc { artifact = "io.grpc:protoc-gen-grpc-java:${grpcVersion}" } + } + generateProtoTasks { + all()*.plugins { grpc {} } + } + generatedFilesBaseDir = "$projectDir/src" +} + +archivesBaseName = "sample-rpc" + +// The processResources task needs to depend on the generateProto task because it uses the output +// of the the generateProto task +processResources { + dependsOn generateProto +} + +sourceCompatibility = 1.8 +targetCompatibility = 1.8 diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/grpc/sample/rpc/CustomerServiceGrpc.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/grpc/sample/rpc/CustomerServiceGrpc.java new file mode 100644 index 00000000..fe124be8 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/grpc/sample/rpc/CustomerServiceGrpc.java @@ -0,0 +1,819 @@ +package sample.rpc; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + *
+ * for Customer Service
+ * 
+ */ +@javax.annotation.Generated( + value = "by gRPC proto compiler (version 1.53.0)", + comments = "Source: sample.proto") +@io.grpc.stub.annotations.GrpcGenerated +public final class CustomerServiceGrpc { + + private CustomerServiceGrpc() {} + + public static final String SERVICE_NAME = "rpc.CustomerService"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor getGetCustomerInfoMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetCustomerInfo", + requestType = sample.rpc.GetCustomerInfoRequest.class, + responseType = sample.rpc.GetCustomerInfoResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getGetCustomerInfoMethod() { + io.grpc.MethodDescriptor getGetCustomerInfoMethod; + if ((getGetCustomerInfoMethod = CustomerServiceGrpc.getGetCustomerInfoMethod) == null) { + synchronized (CustomerServiceGrpc.class) { + if ((getGetCustomerInfoMethod = CustomerServiceGrpc.getGetCustomerInfoMethod) == null) { + CustomerServiceGrpc.getGetCustomerInfoMethod = getGetCustomerInfoMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetCustomerInfo")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.GetCustomerInfoRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.GetCustomerInfoResponse.getDefaultInstance())) + .setSchemaDescriptor(new CustomerServiceMethodDescriptorSupplier("GetCustomerInfo")) + .build(); + } + } + } + return getGetCustomerInfoMethod; + } + + private static volatile io.grpc.MethodDescriptor getRepaymentMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "Repayment", + requestType = sample.rpc.RepaymentRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getRepaymentMethod() { + io.grpc.MethodDescriptor getRepaymentMethod; + if ((getRepaymentMethod = CustomerServiceGrpc.getRepaymentMethod) == null) { + synchronized (CustomerServiceGrpc.class) { + if ((getRepaymentMethod = CustomerServiceGrpc.getRepaymentMethod) == null) { + CustomerServiceGrpc.getRepaymentMethod = getRepaymentMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "Repayment")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.RepaymentRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor(new CustomerServiceMethodDescriptorSupplier("Repayment")) + .build(); + } + } + } + return getRepaymentMethod; + } + + private static volatile io.grpc.MethodDescriptor getPaymentMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "Payment", + requestType = sample.rpc.PaymentRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getPaymentMethod() { + io.grpc.MethodDescriptor getPaymentMethod; + if ((getPaymentMethod = CustomerServiceGrpc.getPaymentMethod) == null) { + synchronized (CustomerServiceGrpc.class) { + if ((getPaymentMethod = CustomerServiceGrpc.getPaymentMethod) == null) { + CustomerServiceGrpc.getPaymentMethod = getPaymentMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "Payment")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.PaymentRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor(new CustomerServiceMethodDescriptorSupplier("Payment")) + .build(); + } + } + } + return getPaymentMethod; + } + + private static volatile io.grpc.MethodDescriptor getPrepareMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "Prepare", + requestType = sample.rpc.PrepareRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getPrepareMethod() { + io.grpc.MethodDescriptor getPrepareMethod; + if ((getPrepareMethod = CustomerServiceGrpc.getPrepareMethod) == null) { + synchronized (CustomerServiceGrpc.class) { + if ((getPrepareMethod = CustomerServiceGrpc.getPrepareMethod) == null) { + CustomerServiceGrpc.getPrepareMethod = getPrepareMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "Prepare")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.PrepareRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor(new CustomerServiceMethodDescriptorSupplier("Prepare")) + .build(); + } + } + } + return getPrepareMethod; + } + + private static volatile io.grpc.MethodDescriptor getValidateMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "Validate", + requestType = sample.rpc.ValidateRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getValidateMethod() { + io.grpc.MethodDescriptor getValidateMethod; + if ((getValidateMethod = CustomerServiceGrpc.getValidateMethod) == null) { + synchronized (CustomerServiceGrpc.class) { + if ((getValidateMethod = CustomerServiceGrpc.getValidateMethod) == null) { + CustomerServiceGrpc.getValidateMethod = getValidateMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "Validate")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.ValidateRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor(new CustomerServiceMethodDescriptorSupplier("Validate")) + .build(); + } + } + } + return getValidateMethod; + } + + private static volatile io.grpc.MethodDescriptor getCommitMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "Commit", + requestType = sample.rpc.CommitRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getCommitMethod() { + io.grpc.MethodDescriptor getCommitMethod; + if ((getCommitMethod = CustomerServiceGrpc.getCommitMethod) == null) { + synchronized (CustomerServiceGrpc.class) { + if ((getCommitMethod = CustomerServiceGrpc.getCommitMethod) == null) { + CustomerServiceGrpc.getCommitMethod = getCommitMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "Commit")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.CommitRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor(new CustomerServiceMethodDescriptorSupplier("Commit")) + .build(); + } + } + } + return getCommitMethod; + } + + private static volatile io.grpc.MethodDescriptor getRollbackMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "Rollback", + requestType = sample.rpc.RollbackRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getRollbackMethod() { + io.grpc.MethodDescriptor getRollbackMethod; + if ((getRollbackMethod = CustomerServiceGrpc.getRollbackMethod) == null) { + synchronized (CustomerServiceGrpc.class) { + if ((getRollbackMethod = CustomerServiceGrpc.getRollbackMethod) == null) { + CustomerServiceGrpc.getRollbackMethod = getRollbackMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "Rollback")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.RollbackRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor(new CustomerServiceMethodDescriptorSupplier("Rollback")) + .build(); + } + } + } + return getRollbackMethod; + } + + /** + * Creates a new async stub that supports all call types for the service + */ + public static CustomerServiceStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public CustomerServiceStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new CustomerServiceStub(channel, callOptions); + } + }; + return CustomerServiceStub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static CustomerServiceBlockingStub newBlockingStub( + io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public CustomerServiceBlockingStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new CustomerServiceBlockingStub(channel, callOptions); + } + }; + return CustomerServiceBlockingStub.newStub(factory, channel); + } + + /** + * Creates a new ListenableFuture-style stub that supports unary calls on the service + */ + public static CustomerServiceFutureStub newFutureStub( + io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public CustomerServiceFutureStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new CustomerServiceFutureStub(channel, callOptions); + } + }; + return CustomerServiceFutureStub.newStub(factory, channel); + } + + /** + *
+   * for Customer Service
+   * 
+ */ + public static abstract class CustomerServiceImplBase implements io.grpc.BindableService { + + /** + *
+     * Get customer information
+     * 
+ */ + public void getCustomerInfo(sample.rpc.GetCustomerInfoRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getGetCustomerInfoMethod(), responseObserver); + } + + /** + *
+     * Credit card repayment
+     * 
+ */ + public void repayment(sample.rpc.RepaymentRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getRepaymentMethod(), responseObserver); + } + + /** + *
+     * Credit card payment
+     * 
+ */ + public void payment(sample.rpc.PaymentRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getPaymentMethod(), responseObserver); + } + + /** + *
+     * Prepare the transaction
+     * 
+ */ + public void prepare(sample.rpc.PrepareRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getPrepareMethod(), responseObserver); + } + + /** + *
+     * Validate the transaction
+     * 
+ */ + public void validate(sample.rpc.ValidateRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getValidateMethod(), responseObserver); + } + + /** + *
+     * Commit the transaction
+     * 
+ */ + public void commit(sample.rpc.CommitRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getCommitMethod(), responseObserver); + } + + /** + *
+     * Rollback the transaction
+     * 
+ */ + public void rollback(sample.rpc.RollbackRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getRollbackMethod(), responseObserver); + } + + @java.lang.Override public final io.grpc.ServerServiceDefinition bindService() { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getGetCustomerInfoMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + sample.rpc.GetCustomerInfoRequest, + sample.rpc.GetCustomerInfoResponse>( + this, METHODID_GET_CUSTOMER_INFO))) + .addMethod( + getRepaymentMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + sample.rpc.RepaymentRequest, + com.google.protobuf.Empty>( + this, METHODID_REPAYMENT))) + .addMethod( + getPaymentMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + sample.rpc.PaymentRequest, + com.google.protobuf.Empty>( + this, METHODID_PAYMENT))) + .addMethod( + getPrepareMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + sample.rpc.PrepareRequest, + com.google.protobuf.Empty>( + this, METHODID_PREPARE))) + .addMethod( + getValidateMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + sample.rpc.ValidateRequest, + com.google.protobuf.Empty>( + this, METHODID_VALIDATE))) + .addMethod( + getCommitMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + sample.rpc.CommitRequest, + com.google.protobuf.Empty>( + this, METHODID_COMMIT))) + .addMethod( + getRollbackMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + sample.rpc.RollbackRequest, + com.google.protobuf.Empty>( + this, METHODID_ROLLBACK))) + .build(); + } + } + + /** + *
+   * for Customer Service
+   * 
+ */ + public static final class CustomerServiceStub extends io.grpc.stub.AbstractAsyncStub { + private CustomerServiceStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected CustomerServiceStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new CustomerServiceStub(channel, callOptions); + } + + /** + *
+     * Get customer information
+     * 
+ */ + public void getCustomerInfo(sample.rpc.GetCustomerInfoRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetCustomerInfoMethod(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Credit card repayment
+     * 
+ */ + public void repayment(sample.rpc.RepaymentRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getRepaymentMethod(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Credit card payment
+     * 
+ */ + public void payment(sample.rpc.PaymentRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getPaymentMethod(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Prepare the transaction
+     * 
+ */ + public void prepare(sample.rpc.PrepareRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getPrepareMethod(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Validate the transaction
+     * 
+ */ + public void validate(sample.rpc.ValidateRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getValidateMethod(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Commit the transaction
+     * 
+ */ + public void commit(sample.rpc.CommitRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCommitMethod(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Rollback the transaction
+     * 
+ */ + public void rollback(sample.rpc.RollbackRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getRollbackMethod(), getCallOptions()), request, responseObserver); + } + } + + /** + *
+   * for Customer Service
+   * 
+ */ + public static final class CustomerServiceBlockingStub extends io.grpc.stub.AbstractBlockingStub { + private CustomerServiceBlockingStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected CustomerServiceBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new CustomerServiceBlockingStub(channel, callOptions); + } + + /** + *
+     * Get customer information
+     * 
+ */ + public sample.rpc.GetCustomerInfoResponse getCustomerInfo(sample.rpc.GetCustomerInfoRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetCustomerInfoMethod(), getCallOptions(), request); + } + + /** + *
+     * Credit card repayment
+     * 
+ */ + public com.google.protobuf.Empty repayment(sample.rpc.RepaymentRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getRepaymentMethod(), getCallOptions(), request); + } + + /** + *
+     * Credit card payment
+     * 
+ */ + public com.google.protobuf.Empty payment(sample.rpc.PaymentRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getPaymentMethod(), getCallOptions(), request); + } + + /** + *
+     * Prepare the transaction
+     * 
+ */ + public com.google.protobuf.Empty prepare(sample.rpc.PrepareRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getPrepareMethod(), getCallOptions(), request); + } + + /** + *
+     * Validate the transaction
+     * 
+ */ + public com.google.protobuf.Empty validate(sample.rpc.ValidateRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getValidateMethod(), getCallOptions(), request); + } + + /** + *
+     * Commit the transaction
+     * 
+ */ + public com.google.protobuf.Empty commit(sample.rpc.CommitRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCommitMethod(), getCallOptions(), request); + } + + /** + *
+     * Rollback the transaction
+     * 
+ */ + public com.google.protobuf.Empty rollback(sample.rpc.RollbackRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getRollbackMethod(), getCallOptions(), request); + } + } + + /** + *
+   * for Customer Service
+   * 
+ */ + public static final class CustomerServiceFutureStub extends io.grpc.stub.AbstractFutureStub { + private CustomerServiceFutureStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected CustomerServiceFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new CustomerServiceFutureStub(channel, callOptions); + } + + /** + *
+     * Get customer information
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture getCustomerInfo( + sample.rpc.GetCustomerInfoRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetCustomerInfoMethod(), getCallOptions()), request); + } + + /** + *
+     * Credit card repayment
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture repayment( + sample.rpc.RepaymentRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getRepaymentMethod(), getCallOptions()), request); + } + + /** + *
+     * Credit card payment
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture payment( + sample.rpc.PaymentRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getPaymentMethod(), getCallOptions()), request); + } + + /** + *
+     * Prepare the transaction
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture prepare( + sample.rpc.PrepareRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getPrepareMethod(), getCallOptions()), request); + } + + /** + *
+     * Validate the transaction
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture validate( + sample.rpc.ValidateRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getValidateMethod(), getCallOptions()), request); + } + + /** + *
+     * Commit the transaction
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture commit( + sample.rpc.CommitRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCommitMethod(), getCallOptions()), request); + } + + /** + *
+     * Rollback the transaction
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture rollback( + sample.rpc.RollbackRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getRollbackMethod(), getCallOptions()), request); + } + } + + private static final int METHODID_GET_CUSTOMER_INFO = 0; + private static final int METHODID_REPAYMENT = 1; + private static final int METHODID_PAYMENT = 2; + private static final int METHODID_PREPARE = 3; + private static final int METHODID_VALIDATE = 4; + private static final int METHODID_COMMIT = 5; + private static final int METHODID_ROLLBACK = 6; + + private static final class MethodHandlers implements + io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final CustomerServiceImplBase serviceImpl; + private final int methodId; + + MethodHandlers(CustomerServiceImplBase serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_GET_CUSTOMER_INFO: + serviceImpl.getCustomerInfo((sample.rpc.GetCustomerInfoRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_REPAYMENT: + serviceImpl.repayment((sample.rpc.RepaymentRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_PAYMENT: + serviceImpl.payment((sample.rpc.PaymentRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_PREPARE: + serviceImpl.prepare((sample.rpc.PrepareRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_VALIDATE: + serviceImpl.validate((sample.rpc.ValidateRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_COMMIT: + serviceImpl.commit((sample.rpc.CommitRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_ROLLBACK: + serviceImpl.rollback((sample.rpc.RollbackRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + default: + throw new AssertionError(); + } + } + } + + private static abstract class CustomerServiceBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, io.grpc.protobuf.ProtoServiceDescriptorSupplier { + CustomerServiceBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return sample.rpc.Sample.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("CustomerService"); + } + } + + private static final class CustomerServiceFileDescriptorSupplier + extends CustomerServiceBaseDescriptorSupplier { + CustomerServiceFileDescriptorSupplier() {} + } + + private static final class CustomerServiceMethodDescriptorSupplier + extends CustomerServiceBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final String methodName; + + CustomerServiceMethodDescriptorSupplier(String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (CustomerServiceGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = result = io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new CustomerServiceFileDescriptorSupplier()) + .addMethod(getGetCustomerInfoMethod()) + .addMethod(getRepaymentMethod()) + .addMethod(getPaymentMethod()) + .addMethod(getPrepareMethod()) + .addMethod(getValidateMethod()) + .addMethod(getCommitMethod()) + .addMethod(getRollbackMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/grpc/sample/rpc/OrderServiceGrpc.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/grpc/sample/rpc/OrderServiceGrpc.java new file mode 100644 index 00000000..30f68eda --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/grpc/sample/rpc/OrderServiceGrpc.java @@ -0,0 +1,475 @@ +package sample.rpc; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + *
+ * for Order Service
+ * 
+ */ +@javax.annotation.Generated( + value = "by gRPC proto compiler (version 1.53.0)", + comments = "Source: sample.proto") +@io.grpc.stub.annotations.GrpcGenerated +public final class OrderServiceGrpc { + + private OrderServiceGrpc() {} + + public static final String SERVICE_NAME = "rpc.OrderService"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor getPlaceOrderMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "PlaceOrder", + requestType = sample.rpc.PlaceOrderRequest.class, + responseType = sample.rpc.PlaceOrderResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getPlaceOrderMethod() { + io.grpc.MethodDescriptor getPlaceOrderMethod; + if ((getPlaceOrderMethod = OrderServiceGrpc.getPlaceOrderMethod) == null) { + synchronized (OrderServiceGrpc.class) { + if ((getPlaceOrderMethod = OrderServiceGrpc.getPlaceOrderMethod) == null) { + OrderServiceGrpc.getPlaceOrderMethod = getPlaceOrderMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "PlaceOrder")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.PlaceOrderRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.PlaceOrderResponse.getDefaultInstance())) + .setSchemaDescriptor(new OrderServiceMethodDescriptorSupplier("PlaceOrder")) + .build(); + } + } + } + return getPlaceOrderMethod; + } + + private static volatile io.grpc.MethodDescriptor getGetOrderMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetOrder", + requestType = sample.rpc.GetOrderRequest.class, + responseType = sample.rpc.GetOrderResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getGetOrderMethod() { + io.grpc.MethodDescriptor getGetOrderMethod; + if ((getGetOrderMethod = OrderServiceGrpc.getGetOrderMethod) == null) { + synchronized (OrderServiceGrpc.class) { + if ((getGetOrderMethod = OrderServiceGrpc.getGetOrderMethod) == null) { + OrderServiceGrpc.getGetOrderMethod = getGetOrderMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetOrder")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.GetOrderRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.GetOrderResponse.getDefaultInstance())) + .setSchemaDescriptor(new OrderServiceMethodDescriptorSupplier("GetOrder")) + .build(); + } + } + } + return getGetOrderMethod; + } + + private static volatile io.grpc.MethodDescriptor getGetOrdersMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetOrders", + requestType = sample.rpc.GetOrdersRequest.class, + responseType = sample.rpc.GetOrdersResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getGetOrdersMethod() { + io.grpc.MethodDescriptor getGetOrdersMethod; + if ((getGetOrdersMethod = OrderServiceGrpc.getGetOrdersMethod) == null) { + synchronized (OrderServiceGrpc.class) { + if ((getGetOrdersMethod = OrderServiceGrpc.getGetOrdersMethod) == null) { + OrderServiceGrpc.getGetOrdersMethod = getGetOrdersMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetOrders")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.GetOrdersRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.GetOrdersResponse.getDefaultInstance())) + .setSchemaDescriptor(new OrderServiceMethodDescriptorSupplier("GetOrders")) + .build(); + } + } + } + return getGetOrdersMethod; + } + + /** + * Creates a new async stub that supports all call types for the service + */ + public static OrderServiceStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public OrderServiceStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new OrderServiceStub(channel, callOptions); + } + }; + return OrderServiceStub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static OrderServiceBlockingStub newBlockingStub( + io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public OrderServiceBlockingStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new OrderServiceBlockingStub(channel, callOptions); + } + }; + return OrderServiceBlockingStub.newStub(factory, channel); + } + + /** + * Creates a new ListenableFuture-style stub that supports unary calls on the service + */ + public static OrderServiceFutureStub newFutureStub( + io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public OrderServiceFutureStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new OrderServiceFutureStub(channel, callOptions); + } + }; + return OrderServiceFutureStub.newStub(factory, channel); + } + + /** + *
+   * for Order Service
+   * 
+ */ + public static abstract class OrderServiceImplBase implements io.grpc.BindableService { + + /** + *
+     * Place an order. It's a transaction that spans OrderService and CustomerService
+     * 
+ */ + public void placeOrder(sample.rpc.PlaceOrderRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getPlaceOrderMethod(), responseObserver); + } + + /** + *
+     * Get Order information by order ID
+     * 
+ */ + public void getOrder(sample.rpc.GetOrderRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getGetOrderMethod(), responseObserver); + } + + /** + *
+     * Get Order information by customer ID
+     * 
+ */ + public void getOrders(sample.rpc.GetOrdersRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getGetOrdersMethod(), responseObserver); + } + + @java.lang.Override public final io.grpc.ServerServiceDefinition bindService() { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getPlaceOrderMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + sample.rpc.PlaceOrderRequest, + sample.rpc.PlaceOrderResponse>( + this, METHODID_PLACE_ORDER))) + .addMethod( + getGetOrderMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + sample.rpc.GetOrderRequest, + sample.rpc.GetOrderResponse>( + this, METHODID_GET_ORDER))) + .addMethod( + getGetOrdersMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + sample.rpc.GetOrdersRequest, + sample.rpc.GetOrdersResponse>( + this, METHODID_GET_ORDERS))) + .build(); + } + } + + /** + *
+   * for Order Service
+   * 
+ */ + public static final class OrderServiceStub extends io.grpc.stub.AbstractAsyncStub { + private OrderServiceStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected OrderServiceStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new OrderServiceStub(channel, callOptions); + } + + /** + *
+     * Place an order. It's a transaction that spans OrderService and CustomerService
+     * 
+ */ + public void placeOrder(sample.rpc.PlaceOrderRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getPlaceOrderMethod(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Get Order information by order ID
+     * 
+ */ + public void getOrder(sample.rpc.GetOrderRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetOrderMethod(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Get Order information by customer ID
+     * 
+ */ + public void getOrders(sample.rpc.GetOrdersRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetOrdersMethod(), getCallOptions()), request, responseObserver); + } + } + + /** + *
+   * for Order Service
+   * 
+ */ + public static final class OrderServiceBlockingStub extends io.grpc.stub.AbstractBlockingStub { + private OrderServiceBlockingStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected OrderServiceBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new OrderServiceBlockingStub(channel, callOptions); + } + + /** + *
+     * Place an order. It's a transaction that spans OrderService and CustomerService
+     * 
+ */ + public sample.rpc.PlaceOrderResponse placeOrder(sample.rpc.PlaceOrderRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getPlaceOrderMethod(), getCallOptions(), request); + } + + /** + *
+     * Get Order information by order ID
+     * 
+ */ + public sample.rpc.GetOrderResponse getOrder(sample.rpc.GetOrderRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetOrderMethod(), getCallOptions(), request); + } + + /** + *
+     * Get Order information by customer ID
+     * 
+ */ + public sample.rpc.GetOrdersResponse getOrders(sample.rpc.GetOrdersRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetOrdersMethod(), getCallOptions(), request); + } + } + + /** + *
+   * for Order Service
+   * 
+ */ + public static final class OrderServiceFutureStub extends io.grpc.stub.AbstractFutureStub { + private OrderServiceFutureStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected OrderServiceFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new OrderServiceFutureStub(channel, callOptions); + } + + /** + *
+     * Place an order. It's a transaction that spans OrderService and CustomerService
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture placeOrder( + sample.rpc.PlaceOrderRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getPlaceOrderMethod(), getCallOptions()), request); + } + + /** + *
+     * Get Order information by order ID
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture getOrder( + sample.rpc.GetOrderRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetOrderMethod(), getCallOptions()), request); + } + + /** + *
+     * Get Order information by customer ID
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture getOrders( + sample.rpc.GetOrdersRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetOrdersMethod(), getCallOptions()), request); + } + } + + private static final int METHODID_PLACE_ORDER = 0; + private static final int METHODID_GET_ORDER = 1; + private static final int METHODID_GET_ORDERS = 2; + + private static final class MethodHandlers implements + io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final OrderServiceImplBase serviceImpl; + private final int methodId; + + MethodHandlers(OrderServiceImplBase serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_PLACE_ORDER: + serviceImpl.placeOrder((sample.rpc.PlaceOrderRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_GET_ORDER: + serviceImpl.getOrder((sample.rpc.GetOrderRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_GET_ORDERS: + serviceImpl.getOrders((sample.rpc.GetOrdersRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + default: + throw new AssertionError(); + } + } + } + + private static abstract class OrderServiceBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, io.grpc.protobuf.ProtoServiceDescriptorSupplier { + OrderServiceBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return sample.rpc.Sample.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("OrderService"); + } + } + + private static final class OrderServiceFileDescriptorSupplier + extends OrderServiceBaseDescriptorSupplier { + OrderServiceFileDescriptorSupplier() {} + } + + private static final class OrderServiceMethodDescriptorSupplier + extends OrderServiceBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final String methodName; + + OrderServiceMethodDescriptorSupplier(String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (OrderServiceGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = result = io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new OrderServiceFileDescriptorSupplier()) + .addMethod(getPlaceOrderMethod()) + .addMethod(getGetOrderMethod()) + .addMethod(getGetOrdersMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/CommitRequest.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/CommitRequest.java new file mode 100644 index 00000000..3fc815f0 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/CommitRequest.java @@ -0,0 +1,509 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.CommitRequest} + */ +public final class CommitRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.CommitRequest) + CommitRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use CommitRequest.newBuilder() to construct. + private CommitRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private CommitRequest() { + transactionId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new CommitRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_CommitRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_CommitRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.CommitRequest.class, sample.rpc.CommitRequest.Builder.class); + } + + public static final int TRANSACTION_ID_FIELD_NUMBER = 1; + @SuppressWarnings("serial") + private volatile java.lang.Object transactionId_ = ""; + /** + * string transaction_id = 1; + * @return The transactionId. + */ + @java.lang.Override + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } + } + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(transactionId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, transactionId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(transactionId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, transactionId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.CommitRequest)) { + return super.equals(obj); + } + sample.rpc.CommitRequest other = (sample.rpc.CommitRequest) obj; + + if (!getTransactionId() + .equals(other.getTransactionId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TRANSACTION_ID_FIELD_NUMBER; + hash = (53 * hash) + getTransactionId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.CommitRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.CommitRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.CommitRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.CommitRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.CommitRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.CommitRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.CommitRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.CommitRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.CommitRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.CommitRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.CommitRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.CommitRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.CommitRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.CommitRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.CommitRequest) + sample.rpc.CommitRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_CommitRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_CommitRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.CommitRequest.class, sample.rpc.CommitRequest.Builder.class); + } + + // Construct using sample.rpc.CommitRequest.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + transactionId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_CommitRequest_descriptor; + } + + @java.lang.Override + public sample.rpc.CommitRequest getDefaultInstanceForType() { + return sample.rpc.CommitRequest.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.CommitRequest build() { + sample.rpc.CommitRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.CommitRequest buildPartial() { + sample.rpc.CommitRequest result = new sample.rpc.CommitRequest(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.CommitRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.transactionId_ = transactionId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.CommitRequest) { + return mergeFrom((sample.rpc.CommitRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.CommitRequest other) { + if (other == sample.rpc.CommitRequest.getDefaultInstance()) return this; + if (!other.getTransactionId().isEmpty()) { + transactionId_ = other.transactionId_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + transactionId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private java.lang.Object transactionId_ = ""; + /** + * string transaction_id = 1; + * @return The transactionId. + */ + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string transaction_id = 1; + * @param value The transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionId( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * string transaction_id = 1; + * @return This builder for chaining. + */ + public Builder clearTransactionId() { + transactionId_ = getDefaultInstance().getTransactionId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * string transaction_id = 1; + * @param value The bytes for transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.CommitRequest) + } + + // @@protoc_insertion_point(class_scope:rpc.CommitRequest) + private static final sample.rpc.CommitRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.CommitRequest(); + } + + public static sample.rpc.CommitRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CommitRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.CommitRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/CommitRequestOrBuilder.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/CommitRequestOrBuilder.java new file mode 100644 index 00000000..cef5ed22 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/CommitRequestOrBuilder.java @@ -0,0 +1,21 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface CommitRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.CommitRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * string transaction_id = 1; + * @return The transactionId. + */ + java.lang.String getTransactionId(); + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + com.google.protobuf.ByteString + getTransactionIdBytes(); +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoRequest.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoRequest.java new file mode 100644 index 00000000..bdfb3bed --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoRequest.java @@ -0,0 +1,599 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.GetCustomerInfoRequest} + */ +public final class GetCustomerInfoRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.GetCustomerInfoRequest) + GetCustomerInfoRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use GetCustomerInfoRequest.newBuilder() to construct. + private GetCustomerInfoRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private GetCustomerInfoRequest() { + transactionId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new GetCustomerInfoRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetCustomerInfoRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetCustomerInfoRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetCustomerInfoRequest.class, sample.rpc.GetCustomerInfoRequest.Builder.class); + } + + private int bitField0_; + public static final int TRANSACTION_ID_FIELD_NUMBER = 1; + @SuppressWarnings("serial") + private volatile java.lang.Object transactionId_ = ""; + /** + * optional string transaction_id = 1; + * @return Whether the transactionId field is set. + */ + @java.lang.Override + public boolean hasTransactionId() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * optional string transaction_id = 1; + * @return The transactionId. + */ + @java.lang.Override + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } + } + /** + * optional string transaction_id = 1; + * @return The bytes for transactionId. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CUSTOMER_ID_FIELD_NUMBER = 2; + private int customerId_ = 0; + /** + * int32 customer_id = 2; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, transactionId_); + } + if (customerId_ != 0) { + output.writeInt32(2, customerId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, transactionId_); + } + if (customerId_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, customerId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.GetCustomerInfoRequest)) { + return super.equals(obj); + } + sample.rpc.GetCustomerInfoRequest other = (sample.rpc.GetCustomerInfoRequest) obj; + + if (hasTransactionId() != other.hasTransactionId()) return false; + if (hasTransactionId()) { + if (!getTransactionId() + .equals(other.getTransactionId())) return false; + } + if (getCustomerId() + != other.getCustomerId()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasTransactionId()) { + hash = (37 * hash) + TRANSACTION_ID_FIELD_NUMBER; + hash = (53 * hash) + getTransactionId().hashCode(); + } + hash = (37 * hash) + CUSTOMER_ID_FIELD_NUMBER; + hash = (53 * hash) + getCustomerId(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.GetCustomerInfoRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetCustomerInfoRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetCustomerInfoRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetCustomerInfoRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetCustomerInfoRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetCustomerInfoRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetCustomerInfoRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetCustomerInfoRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.GetCustomerInfoRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.GetCustomerInfoRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.GetCustomerInfoRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetCustomerInfoRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.GetCustomerInfoRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.GetCustomerInfoRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.GetCustomerInfoRequest) + sample.rpc.GetCustomerInfoRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetCustomerInfoRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetCustomerInfoRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetCustomerInfoRequest.class, sample.rpc.GetCustomerInfoRequest.Builder.class); + } + + // Construct using sample.rpc.GetCustomerInfoRequest.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + transactionId_ = ""; + customerId_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_GetCustomerInfoRequest_descriptor; + } + + @java.lang.Override + public sample.rpc.GetCustomerInfoRequest getDefaultInstanceForType() { + return sample.rpc.GetCustomerInfoRequest.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.GetCustomerInfoRequest build() { + sample.rpc.GetCustomerInfoRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.GetCustomerInfoRequest buildPartial() { + sample.rpc.GetCustomerInfoRequest result = new sample.rpc.GetCustomerInfoRequest(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.GetCustomerInfoRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.transactionId_ = transactionId_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.customerId_ = customerId_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.GetCustomerInfoRequest) { + return mergeFrom((sample.rpc.GetCustomerInfoRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.GetCustomerInfoRequest other) { + if (other == sample.rpc.GetCustomerInfoRequest.getDefaultInstance()) return this; + if (other.hasTransactionId()) { + transactionId_ = other.transactionId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getCustomerId() != 0) { + setCustomerId(other.getCustomerId()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + transactionId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: { + customerId_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private java.lang.Object transactionId_ = ""; + /** + * optional string transaction_id = 1; + * @return Whether the transactionId field is set. + */ + public boolean hasTransactionId() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * optional string transaction_id = 1; + * @return The transactionId. + */ + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string transaction_id = 1; + * @return The bytes for transactionId. + */ + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string transaction_id = 1; + * @param value The transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionId( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * optional string transaction_id = 1; + * @return This builder for chaining. + */ + public Builder clearTransactionId() { + transactionId_ = getDefaultInstance().getTransactionId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * optional string transaction_id = 1; + * @param value The bytes for transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int customerId_ ; + /** + * int32 customer_id = 2; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + /** + * int32 customer_id = 2; + * @param value The customerId to set. + * @return This builder for chaining. + */ + public Builder setCustomerId(int value) { + + customerId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * int32 customer_id = 2; + * @return This builder for chaining. + */ + public Builder clearCustomerId() { + bitField0_ = (bitField0_ & ~0x00000002); + customerId_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.GetCustomerInfoRequest) + } + + // @@protoc_insertion_point(class_scope:rpc.GetCustomerInfoRequest) + private static final sample.rpc.GetCustomerInfoRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.GetCustomerInfoRequest(); + } + + public static sample.rpc.GetCustomerInfoRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetCustomerInfoRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.GetCustomerInfoRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoRequestOrBuilder.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoRequestOrBuilder.java new file mode 100644 index 00000000..fa5a8165 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoRequestOrBuilder.java @@ -0,0 +1,32 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface GetCustomerInfoRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.GetCustomerInfoRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * optional string transaction_id = 1; + * @return Whether the transactionId field is set. + */ + boolean hasTransactionId(); + /** + * optional string transaction_id = 1; + * @return The transactionId. + */ + java.lang.String getTransactionId(); + /** + * optional string transaction_id = 1; + * @return The bytes for transactionId. + */ + com.google.protobuf.ByteString + getTransactionIdBytes(); + + /** + * int32 customer_id = 2; + * @return The customerId. + */ + int getCustomerId(); +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoResponse.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoResponse.java new file mode 100644 index 00000000..c89a0a68 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoResponse.java @@ -0,0 +1,707 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.GetCustomerInfoResponse} + */ +public final class GetCustomerInfoResponse extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.GetCustomerInfoResponse) + GetCustomerInfoResponseOrBuilder { +private static final long serialVersionUID = 0L; + // Use GetCustomerInfoResponse.newBuilder() to construct. + private GetCustomerInfoResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private GetCustomerInfoResponse() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new GetCustomerInfoResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetCustomerInfoResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetCustomerInfoResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetCustomerInfoResponse.class, sample.rpc.GetCustomerInfoResponse.Builder.class); + } + + public static final int ID_FIELD_NUMBER = 1; + private int id_ = 0; + /** + * int32 id = 1; + * @return The id. + */ + @java.lang.Override + public int getId() { + return id_; + } + + public static final int NAME_FIELD_NUMBER = 2; + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + /** + * string name = 2; + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * string name = 2; + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CREDIT_LIMIT_FIELD_NUMBER = 3; + private int creditLimit_ = 0; + /** + * int32 credit_limit = 3; + * @return The creditLimit. + */ + @java.lang.Override + public int getCreditLimit() { + return creditLimit_; + } + + public static final int CREDIT_TOTAL_FIELD_NUMBER = 4; + private int creditTotal_ = 0; + /** + * int32 credit_total = 4; + * @return The creditTotal. + */ + @java.lang.Override + public int getCreditTotal() { + return creditTotal_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (id_ != 0) { + output.writeInt32(1, id_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, name_); + } + if (creditLimit_ != 0) { + output.writeInt32(3, creditLimit_); + } + if (creditTotal_ != 0) { + output.writeInt32(4, creditTotal_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (id_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, id_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, name_); + } + if (creditLimit_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(3, creditLimit_); + } + if (creditTotal_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(4, creditTotal_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.GetCustomerInfoResponse)) { + return super.equals(obj); + } + sample.rpc.GetCustomerInfoResponse other = (sample.rpc.GetCustomerInfoResponse) obj; + + if (getId() + != other.getId()) return false; + if (!getName() + .equals(other.getName())) return false; + if (getCreditLimit() + != other.getCreditLimit()) return false; + if (getCreditTotal() + != other.getCreditTotal()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ID_FIELD_NUMBER; + hash = (53 * hash) + getId(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + CREDIT_LIMIT_FIELD_NUMBER; + hash = (53 * hash) + getCreditLimit(); + hash = (37 * hash) + CREDIT_TOTAL_FIELD_NUMBER; + hash = (53 * hash) + getCreditTotal(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.GetCustomerInfoResponse parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetCustomerInfoResponse parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetCustomerInfoResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetCustomerInfoResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetCustomerInfoResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetCustomerInfoResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetCustomerInfoResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetCustomerInfoResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.GetCustomerInfoResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.GetCustomerInfoResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.GetCustomerInfoResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetCustomerInfoResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.GetCustomerInfoResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.GetCustomerInfoResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.GetCustomerInfoResponse) + sample.rpc.GetCustomerInfoResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetCustomerInfoResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetCustomerInfoResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetCustomerInfoResponse.class, sample.rpc.GetCustomerInfoResponse.Builder.class); + } + + // Construct using sample.rpc.GetCustomerInfoResponse.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + id_ = 0; + name_ = ""; + creditLimit_ = 0; + creditTotal_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_GetCustomerInfoResponse_descriptor; + } + + @java.lang.Override + public sample.rpc.GetCustomerInfoResponse getDefaultInstanceForType() { + return sample.rpc.GetCustomerInfoResponse.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.GetCustomerInfoResponse build() { + sample.rpc.GetCustomerInfoResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.GetCustomerInfoResponse buildPartial() { + sample.rpc.GetCustomerInfoResponse result = new sample.rpc.GetCustomerInfoResponse(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.GetCustomerInfoResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.id_ = id_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.creditLimit_ = creditLimit_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.creditTotal_ = creditTotal_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.GetCustomerInfoResponse) { + return mergeFrom((sample.rpc.GetCustomerInfoResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.GetCustomerInfoResponse other) { + if (other == sample.rpc.GetCustomerInfoResponse.getDefaultInstance()) return this; + if (other.getId() != 0) { + setId(other.getId()); + } + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.getCreditLimit() != 0) { + setCreditLimit(other.getCreditLimit()); + } + if (other.getCreditTotal() != 0) { + setCreditTotal(other.getCreditTotal()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + id_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: { + creditLimit_ = input.readInt32(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 32: { + creditTotal_ = input.readInt32(); + bitField0_ |= 0x00000008; + break; + } // case 32 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private int id_ ; + /** + * int32 id = 1; + * @return The id. + */ + @java.lang.Override + public int getId() { + return id_; + } + /** + * int32 id = 1; + * @param value The id to set. + * @return This builder for chaining. + */ + public Builder setId(int value) { + + id_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * int32 id = 1; + * @return This builder for chaining. + */ + public Builder clearId() { + bitField0_ = (bitField0_ & ~0x00000001); + id_ = 0; + onChanged(); + return this; + } + + private java.lang.Object name_ = ""; + /** + * string name = 2; + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string name = 2; + * @return The bytes for name. + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string name = 2; + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + name_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * string name = 2; + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * string name = 2; + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private int creditLimit_ ; + /** + * int32 credit_limit = 3; + * @return The creditLimit. + */ + @java.lang.Override + public int getCreditLimit() { + return creditLimit_; + } + /** + * int32 credit_limit = 3; + * @param value The creditLimit to set. + * @return This builder for chaining. + */ + public Builder setCreditLimit(int value) { + + creditLimit_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * int32 credit_limit = 3; + * @return This builder for chaining. + */ + public Builder clearCreditLimit() { + bitField0_ = (bitField0_ & ~0x00000004); + creditLimit_ = 0; + onChanged(); + return this; + } + + private int creditTotal_ ; + /** + * int32 credit_total = 4; + * @return The creditTotal. + */ + @java.lang.Override + public int getCreditTotal() { + return creditTotal_; + } + /** + * int32 credit_total = 4; + * @param value The creditTotal to set. + * @return This builder for chaining. + */ + public Builder setCreditTotal(int value) { + + creditTotal_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + /** + * int32 credit_total = 4; + * @return This builder for chaining. + */ + public Builder clearCreditTotal() { + bitField0_ = (bitField0_ & ~0x00000008); + creditTotal_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.GetCustomerInfoResponse) + } + + // @@protoc_insertion_point(class_scope:rpc.GetCustomerInfoResponse) + private static final sample.rpc.GetCustomerInfoResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.GetCustomerInfoResponse(); + } + + public static sample.rpc.GetCustomerInfoResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetCustomerInfoResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.GetCustomerInfoResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoResponseOrBuilder.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoResponseOrBuilder.java new file mode 100644 index 00000000..48ddb5a0 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoResponseOrBuilder.java @@ -0,0 +1,39 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface GetCustomerInfoResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.GetCustomerInfoResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * int32 id = 1; + * @return The id. + */ + int getId(); + + /** + * string name = 2; + * @return The name. + */ + java.lang.String getName(); + /** + * string name = 2; + * @return The bytes for name. + */ + com.google.protobuf.ByteString + getNameBytes(); + + /** + * int32 credit_limit = 3; + * @return The creditLimit. + */ + int getCreditLimit(); + + /** + * int32 credit_total = 4; + * @return The creditTotal. + */ + int getCreditTotal(); +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderRequest.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderRequest.java new file mode 100644 index 00000000..f9bae4f4 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderRequest.java @@ -0,0 +1,509 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.GetOrderRequest} + */ +public final class GetOrderRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.GetOrderRequest) + GetOrderRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use GetOrderRequest.newBuilder() to construct. + private GetOrderRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private GetOrderRequest() { + orderId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new GetOrderRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetOrderRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetOrderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetOrderRequest.class, sample.rpc.GetOrderRequest.Builder.class); + } + + public static final int ORDER_ID_FIELD_NUMBER = 1; + @SuppressWarnings("serial") + private volatile java.lang.Object orderId_ = ""; + /** + * string order_id = 1; + * @return The orderId. + */ + @java.lang.Override + public java.lang.String getOrderId() { + java.lang.Object ref = orderId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + orderId_ = s; + return s; + } + } + /** + * string order_id = 1; + * @return The bytes for orderId. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getOrderIdBytes() { + java.lang.Object ref = orderId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + orderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(orderId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, orderId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(orderId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, orderId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.GetOrderRequest)) { + return super.equals(obj); + } + sample.rpc.GetOrderRequest other = (sample.rpc.GetOrderRequest) obj; + + if (!getOrderId() + .equals(other.getOrderId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ORDER_ID_FIELD_NUMBER; + hash = (53 * hash) + getOrderId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.GetOrderRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrderRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrderRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrderRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrderRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrderRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrderRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetOrderRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.GetOrderRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.GetOrderRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.GetOrderRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetOrderRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.GetOrderRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.GetOrderRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.GetOrderRequest) + sample.rpc.GetOrderRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetOrderRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetOrderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetOrderRequest.class, sample.rpc.GetOrderRequest.Builder.class); + } + + // Construct using sample.rpc.GetOrderRequest.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + orderId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_GetOrderRequest_descriptor; + } + + @java.lang.Override + public sample.rpc.GetOrderRequest getDefaultInstanceForType() { + return sample.rpc.GetOrderRequest.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.GetOrderRequest build() { + sample.rpc.GetOrderRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.GetOrderRequest buildPartial() { + sample.rpc.GetOrderRequest result = new sample.rpc.GetOrderRequest(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.GetOrderRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.orderId_ = orderId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.GetOrderRequest) { + return mergeFrom((sample.rpc.GetOrderRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.GetOrderRequest other) { + if (other == sample.rpc.GetOrderRequest.getDefaultInstance()) return this; + if (!other.getOrderId().isEmpty()) { + orderId_ = other.orderId_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + orderId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private java.lang.Object orderId_ = ""; + /** + * string order_id = 1; + * @return The orderId. + */ + public java.lang.String getOrderId() { + java.lang.Object ref = orderId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + orderId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string order_id = 1; + * @return The bytes for orderId. + */ + public com.google.protobuf.ByteString + getOrderIdBytes() { + java.lang.Object ref = orderId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + orderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string order_id = 1; + * @param value The orderId to set. + * @return This builder for chaining. + */ + public Builder setOrderId( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + orderId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * string order_id = 1; + * @return This builder for chaining. + */ + public Builder clearOrderId() { + orderId_ = getDefaultInstance().getOrderId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * string order_id = 1; + * @param value The bytes for orderId to set. + * @return This builder for chaining. + */ + public Builder setOrderIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + orderId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.GetOrderRequest) + } + + // @@protoc_insertion_point(class_scope:rpc.GetOrderRequest) + private static final sample.rpc.GetOrderRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.GetOrderRequest(); + } + + public static sample.rpc.GetOrderRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetOrderRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.GetOrderRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderRequestOrBuilder.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderRequestOrBuilder.java new file mode 100644 index 00000000..dfe42efc --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderRequestOrBuilder.java @@ -0,0 +1,21 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface GetOrderRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.GetOrderRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * string order_id = 1; + * @return The orderId. + */ + java.lang.String getOrderId(); + /** + * string order_id = 1; + * @return The bytes for orderId. + */ + com.google.protobuf.ByteString + getOrderIdBytes(); +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderResponse.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderResponse.java new file mode 100644 index 00000000..c707fd38 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderResponse.java @@ -0,0 +1,554 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.GetOrderResponse} + */ +public final class GetOrderResponse extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.GetOrderResponse) + GetOrderResponseOrBuilder { +private static final long serialVersionUID = 0L; + // Use GetOrderResponse.newBuilder() to construct. + private GetOrderResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private GetOrderResponse() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new GetOrderResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetOrderResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetOrderResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetOrderResponse.class, sample.rpc.GetOrderResponse.Builder.class); + } + + public static final int ORDER_FIELD_NUMBER = 1; + private sample.rpc.Order order_; + /** + * .rpc.Order order = 1; + * @return Whether the order field is set. + */ + @java.lang.Override + public boolean hasOrder() { + return order_ != null; + } + /** + * .rpc.Order order = 1; + * @return The order. + */ + @java.lang.Override + public sample.rpc.Order getOrder() { + return order_ == null ? sample.rpc.Order.getDefaultInstance() : order_; + } + /** + * .rpc.Order order = 1; + */ + @java.lang.Override + public sample.rpc.OrderOrBuilder getOrderOrBuilder() { + return order_ == null ? sample.rpc.Order.getDefaultInstance() : order_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (order_ != null) { + output.writeMessage(1, getOrder()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (order_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, getOrder()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.GetOrderResponse)) { + return super.equals(obj); + } + sample.rpc.GetOrderResponse other = (sample.rpc.GetOrderResponse) obj; + + if (hasOrder() != other.hasOrder()) return false; + if (hasOrder()) { + if (!getOrder() + .equals(other.getOrder())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasOrder()) { + hash = (37 * hash) + ORDER_FIELD_NUMBER; + hash = (53 * hash) + getOrder().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.GetOrderResponse parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrderResponse parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrderResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrderResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrderResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrderResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrderResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetOrderResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.GetOrderResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.GetOrderResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.GetOrderResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetOrderResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.GetOrderResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.GetOrderResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.GetOrderResponse) + sample.rpc.GetOrderResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetOrderResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetOrderResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetOrderResponse.class, sample.rpc.GetOrderResponse.Builder.class); + } + + // Construct using sample.rpc.GetOrderResponse.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + order_ = null; + if (orderBuilder_ != null) { + orderBuilder_.dispose(); + orderBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_GetOrderResponse_descriptor; + } + + @java.lang.Override + public sample.rpc.GetOrderResponse getDefaultInstanceForType() { + return sample.rpc.GetOrderResponse.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.GetOrderResponse build() { + sample.rpc.GetOrderResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.GetOrderResponse buildPartial() { + sample.rpc.GetOrderResponse result = new sample.rpc.GetOrderResponse(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.GetOrderResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.order_ = orderBuilder_ == null + ? order_ + : orderBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.GetOrderResponse) { + return mergeFrom((sample.rpc.GetOrderResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.GetOrderResponse other) { + if (other == sample.rpc.GetOrderResponse.getDefaultInstance()) return this; + if (other.hasOrder()) { + mergeOrder(other.getOrder()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + input.readMessage( + getOrderFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private sample.rpc.Order order_; + private com.google.protobuf.SingleFieldBuilderV3< + sample.rpc.Order, sample.rpc.Order.Builder, sample.rpc.OrderOrBuilder> orderBuilder_; + /** + * .rpc.Order order = 1; + * @return Whether the order field is set. + */ + public boolean hasOrder() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * .rpc.Order order = 1; + * @return The order. + */ + public sample.rpc.Order getOrder() { + if (orderBuilder_ == null) { + return order_ == null ? sample.rpc.Order.getDefaultInstance() : order_; + } else { + return orderBuilder_.getMessage(); + } + } + /** + * .rpc.Order order = 1; + */ + public Builder setOrder(sample.rpc.Order value) { + if (orderBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + order_ = value; + } else { + orderBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * .rpc.Order order = 1; + */ + public Builder setOrder( + sample.rpc.Order.Builder builderForValue) { + if (orderBuilder_ == null) { + order_ = builderForValue.build(); + } else { + orderBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * .rpc.Order order = 1; + */ + public Builder mergeOrder(sample.rpc.Order value) { + if (orderBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) && + order_ != null && + order_ != sample.rpc.Order.getDefaultInstance()) { + getOrderBuilder().mergeFrom(value); + } else { + order_ = value; + } + } else { + orderBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * .rpc.Order order = 1; + */ + public Builder clearOrder() { + bitField0_ = (bitField0_ & ~0x00000001); + order_ = null; + if (orderBuilder_ != null) { + orderBuilder_.dispose(); + orderBuilder_ = null; + } + onChanged(); + return this; + } + /** + * .rpc.Order order = 1; + */ + public sample.rpc.Order.Builder getOrderBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getOrderFieldBuilder().getBuilder(); + } + /** + * .rpc.Order order = 1; + */ + public sample.rpc.OrderOrBuilder getOrderOrBuilder() { + if (orderBuilder_ != null) { + return orderBuilder_.getMessageOrBuilder(); + } else { + return order_ == null ? + sample.rpc.Order.getDefaultInstance() : order_; + } + } + /** + * .rpc.Order order = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + sample.rpc.Order, sample.rpc.Order.Builder, sample.rpc.OrderOrBuilder> + getOrderFieldBuilder() { + if (orderBuilder_ == null) { + orderBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + sample.rpc.Order, sample.rpc.Order.Builder, sample.rpc.OrderOrBuilder>( + getOrder(), + getParentForChildren(), + isClean()); + order_ = null; + } + return orderBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.GetOrderResponse) + } + + // @@protoc_insertion_point(class_scope:rpc.GetOrderResponse) + private static final sample.rpc.GetOrderResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.GetOrderResponse(); + } + + public static sample.rpc.GetOrderResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetOrderResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.GetOrderResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderResponseOrBuilder.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderResponseOrBuilder.java new file mode 100644 index 00000000..a29de622 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderResponseOrBuilder.java @@ -0,0 +1,24 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface GetOrderResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.GetOrderResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * .rpc.Order order = 1; + * @return Whether the order field is set. + */ + boolean hasOrder(); + /** + * .rpc.Order order = 1; + * @return The order. + */ + sample.rpc.Order getOrder(); + /** + * .rpc.Order order = 1; + */ + sample.rpc.OrderOrBuilder getOrderOrBuilder(); +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersRequest.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersRequest.java new file mode 100644 index 00000000..dee46ffb --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersRequest.java @@ -0,0 +1,439 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.GetOrdersRequest} + */ +public final class GetOrdersRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.GetOrdersRequest) + GetOrdersRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use GetOrdersRequest.newBuilder() to construct. + private GetOrdersRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private GetOrdersRequest() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new GetOrdersRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetOrdersRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetOrdersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetOrdersRequest.class, sample.rpc.GetOrdersRequest.Builder.class); + } + + public static final int CUSTOMER_ID_FIELD_NUMBER = 1; + private int customerId_ = 0; + /** + * int32 customer_id = 1; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (customerId_ != 0) { + output.writeInt32(1, customerId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (customerId_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, customerId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.GetOrdersRequest)) { + return super.equals(obj); + } + sample.rpc.GetOrdersRequest other = (sample.rpc.GetOrdersRequest) obj; + + if (getCustomerId() + != other.getCustomerId()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + CUSTOMER_ID_FIELD_NUMBER; + hash = (53 * hash) + getCustomerId(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.GetOrdersRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrdersRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrdersRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrdersRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrdersRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrdersRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrdersRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetOrdersRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.GetOrdersRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.GetOrdersRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.GetOrdersRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetOrdersRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.GetOrdersRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.GetOrdersRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.GetOrdersRequest) + sample.rpc.GetOrdersRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetOrdersRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetOrdersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetOrdersRequest.class, sample.rpc.GetOrdersRequest.Builder.class); + } + + // Construct using sample.rpc.GetOrdersRequest.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + customerId_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_GetOrdersRequest_descriptor; + } + + @java.lang.Override + public sample.rpc.GetOrdersRequest getDefaultInstanceForType() { + return sample.rpc.GetOrdersRequest.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.GetOrdersRequest build() { + sample.rpc.GetOrdersRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.GetOrdersRequest buildPartial() { + sample.rpc.GetOrdersRequest result = new sample.rpc.GetOrdersRequest(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.GetOrdersRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.customerId_ = customerId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.GetOrdersRequest) { + return mergeFrom((sample.rpc.GetOrdersRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.GetOrdersRequest other) { + if (other == sample.rpc.GetOrdersRequest.getDefaultInstance()) return this; + if (other.getCustomerId() != 0) { + setCustomerId(other.getCustomerId()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + customerId_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private int customerId_ ; + /** + * int32 customer_id = 1; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + /** + * int32 customer_id = 1; + * @param value The customerId to set. + * @return This builder for chaining. + */ + public Builder setCustomerId(int value) { + + customerId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * int32 customer_id = 1; + * @return This builder for chaining. + */ + public Builder clearCustomerId() { + bitField0_ = (bitField0_ & ~0x00000001); + customerId_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.GetOrdersRequest) + } + + // @@protoc_insertion_point(class_scope:rpc.GetOrdersRequest) + private static final sample.rpc.GetOrdersRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.GetOrdersRequest(); + } + + public static sample.rpc.GetOrdersRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetOrdersRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.GetOrdersRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersRequestOrBuilder.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersRequestOrBuilder.java new file mode 100644 index 00000000..6a536778 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersRequestOrBuilder.java @@ -0,0 +1,15 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface GetOrdersRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.GetOrdersRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * int32 customer_id = 1; + * @return The customerId. + */ + int getCustomerId(); +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersResponse.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersResponse.java new file mode 100644 index 00000000..e4b1b286 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersResponse.java @@ -0,0 +1,727 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.GetOrdersResponse} + */ +public final class GetOrdersResponse extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.GetOrdersResponse) + GetOrdersResponseOrBuilder { +private static final long serialVersionUID = 0L; + // Use GetOrdersResponse.newBuilder() to construct. + private GetOrdersResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private GetOrdersResponse() { + order_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new GetOrdersResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetOrdersResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetOrdersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetOrdersResponse.class, sample.rpc.GetOrdersResponse.Builder.class); + } + + public static final int ORDER_FIELD_NUMBER = 1; + @SuppressWarnings("serial") + private java.util.List order_; + /** + * repeated .rpc.Order order = 1; + */ + @java.lang.Override + public java.util.List getOrderList() { + return order_; + } + /** + * repeated .rpc.Order order = 1; + */ + @java.lang.Override + public java.util.List + getOrderOrBuilderList() { + return order_; + } + /** + * repeated .rpc.Order order = 1; + */ + @java.lang.Override + public int getOrderCount() { + return order_.size(); + } + /** + * repeated .rpc.Order order = 1; + */ + @java.lang.Override + public sample.rpc.Order getOrder(int index) { + return order_.get(index); + } + /** + * repeated .rpc.Order order = 1; + */ + @java.lang.Override + public sample.rpc.OrderOrBuilder getOrderOrBuilder( + int index) { + return order_.get(index); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < order_.size(); i++) { + output.writeMessage(1, order_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < order_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, order_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.GetOrdersResponse)) { + return super.equals(obj); + } + sample.rpc.GetOrdersResponse other = (sample.rpc.GetOrdersResponse) obj; + + if (!getOrderList() + .equals(other.getOrderList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getOrderCount() > 0) { + hash = (37 * hash) + ORDER_FIELD_NUMBER; + hash = (53 * hash) + getOrderList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.GetOrdersResponse parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrdersResponse parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrdersResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrdersResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrdersResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrdersResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrdersResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetOrdersResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.GetOrdersResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.GetOrdersResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.GetOrdersResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetOrdersResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.GetOrdersResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.GetOrdersResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.GetOrdersResponse) + sample.rpc.GetOrdersResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetOrdersResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetOrdersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetOrdersResponse.class, sample.rpc.GetOrdersResponse.Builder.class); + } + + // Construct using sample.rpc.GetOrdersResponse.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (orderBuilder_ == null) { + order_ = java.util.Collections.emptyList(); + } else { + order_ = null; + orderBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_GetOrdersResponse_descriptor; + } + + @java.lang.Override + public sample.rpc.GetOrdersResponse getDefaultInstanceForType() { + return sample.rpc.GetOrdersResponse.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.GetOrdersResponse build() { + sample.rpc.GetOrdersResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.GetOrdersResponse buildPartial() { + sample.rpc.GetOrdersResponse result = new sample.rpc.GetOrdersResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(sample.rpc.GetOrdersResponse result) { + if (orderBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + order_ = java.util.Collections.unmodifiableList(order_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.order_ = order_; + } else { + result.order_ = orderBuilder_.build(); + } + } + + private void buildPartial0(sample.rpc.GetOrdersResponse result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.GetOrdersResponse) { + return mergeFrom((sample.rpc.GetOrdersResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.GetOrdersResponse other) { + if (other == sample.rpc.GetOrdersResponse.getDefaultInstance()) return this; + if (orderBuilder_ == null) { + if (!other.order_.isEmpty()) { + if (order_.isEmpty()) { + order_ = other.order_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureOrderIsMutable(); + order_.addAll(other.order_); + } + onChanged(); + } + } else { + if (!other.order_.isEmpty()) { + if (orderBuilder_.isEmpty()) { + orderBuilder_.dispose(); + orderBuilder_ = null; + order_ = other.order_; + bitField0_ = (bitField0_ & ~0x00000001); + orderBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getOrderFieldBuilder() : null; + } else { + orderBuilder_.addAllMessages(other.order_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + sample.rpc.Order m = + input.readMessage( + sample.rpc.Order.parser(), + extensionRegistry); + if (orderBuilder_ == null) { + ensureOrderIsMutable(); + order_.add(m); + } else { + orderBuilder_.addMessage(m); + } + break; + } // case 10 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private java.util.List order_ = + java.util.Collections.emptyList(); + private void ensureOrderIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + order_ = new java.util.ArrayList(order_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + sample.rpc.Order, sample.rpc.Order.Builder, sample.rpc.OrderOrBuilder> orderBuilder_; + + /** + * repeated .rpc.Order order = 1; + */ + public java.util.List getOrderList() { + if (orderBuilder_ == null) { + return java.util.Collections.unmodifiableList(order_); + } else { + return orderBuilder_.getMessageList(); + } + } + /** + * repeated .rpc.Order order = 1; + */ + public int getOrderCount() { + if (orderBuilder_ == null) { + return order_.size(); + } else { + return orderBuilder_.getCount(); + } + } + /** + * repeated .rpc.Order order = 1; + */ + public sample.rpc.Order getOrder(int index) { + if (orderBuilder_ == null) { + return order_.get(index); + } else { + return orderBuilder_.getMessage(index); + } + } + /** + * repeated .rpc.Order order = 1; + */ + public Builder setOrder( + int index, sample.rpc.Order value) { + if (orderBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOrderIsMutable(); + order_.set(index, value); + onChanged(); + } else { + orderBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .rpc.Order order = 1; + */ + public Builder setOrder( + int index, sample.rpc.Order.Builder builderForValue) { + if (orderBuilder_ == null) { + ensureOrderIsMutable(); + order_.set(index, builderForValue.build()); + onChanged(); + } else { + orderBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .rpc.Order order = 1; + */ + public Builder addOrder(sample.rpc.Order value) { + if (orderBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOrderIsMutable(); + order_.add(value); + onChanged(); + } else { + orderBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .rpc.Order order = 1; + */ + public Builder addOrder( + int index, sample.rpc.Order value) { + if (orderBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOrderIsMutable(); + order_.add(index, value); + onChanged(); + } else { + orderBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .rpc.Order order = 1; + */ + public Builder addOrder( + sample.rpc.Order.Builder builderForValue) { + if (orderBuilder_ == null) { + ensureOrderIsMutable(); + order_.add(builderForValue.build()); + onChanged(); + } else { + orderBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .rpc.Order order = 1; + */ + public Builder addOrder( + int index, sample.rpc.Order.Builder builderForValue) { + if (orderBuilder_ == null) { + ensureOrderIsMutable(); + order_.add(index, builderForValue.build()); + onChanged(); + } else { + orderBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .rpc.Order order = 1; + */ + public Builder addAllOrder( + java.lang.Iterable values) { + if (orderBuilder_ == null) { + ensureOrderIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, order_); + onChanged(); + } else { + orderBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .rpc.Order order = 1; + */ + public Builder clearOrder() { + if (orderBuilder_ == null) { + order_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + orderBuilder_.clear(); + } + return this; + } + /** + * repeated .rpc.Order order = 1; + */ + public Builder removeOrder(int index) { + if (orderBuilder_ == null) { + ensureOrderIsMutable(); + order_.remove(index); + onChanged(); + } else { + orderBuilder_.remove(index); + } + return this; + } + /** + * repeated .rpc.Order order = 1; + */ + public sample.rpc.Order.Builder getOrderBuilder( + int index) { + return getOrderFieldBuilder().getBuilder(index); + } + /** + * repeated .rpc.Order order = 1; + */ + public sample.rpc.OrderOrBuilder getOrderOrBuilder( + int index) { + if (orderBuilder_ == null) { + return order_.get(index); } else { + return orderBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .rpc.Order order = 1; + */ + public java.util.List + getOrderOrBuilderList() { + if (orderBuilder_ != null) { + return orderBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(order_); + } + } + /** + * repeated .rpc.Order order = 1; + */ + public sample.rpc.Order.Builder addOrderBuilder() { + return getOrderFieldBuilder().addBuilder( + sample.rpc.Order.getDefaultInstance()); + } + /** + * repeated .rpc.Order order = 1; + */ + public sample.rpc.Order.Builder addOrderBuilder( + int index) { + return getOrderFieldBuilder().addBuilder( + index, sample.rpc.Order.getDefaultInstance()); + } + /** + * repeated .rpc.Order order = 1; + */ + public java.util.List + getOrderBuilderList() { + return getOrderFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + sample.rpc.Order, sample.rpc.Order.Builder, sample.rpc.OrderOrBuilder> + getOrderFieldBuilder() { + if (orderBuilder_ == null) { + orderBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + sample.rpc.Order, sample.rpc.Order.Builder, sample.rpc.OrderOrBuilder>( + order_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + order_ = null; + } + return orderBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.GetOrdersResponse) + } + + // @@protoc_insertion_point(class_scope:rpc.GetOrdersResponse) + private static final sample.rpc.GetOrdersResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.GetOrdersResponse(); + } + + public static sample.rpc.GetOrdersResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetOrdersResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.GetOrdersResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersResponseOrBuilder.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersResponseOrBuilder.java new file mode 100644 index 00000000..fff2fcfd --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersResponseOrBuilder.java @@ -0,0 +1,33 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface GetOrdersResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.GetOrdersResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * repeated .rpc.Order order = 1; + */ + java.util.List + getOrderList(); + /** + * repeated .rpc.Order order = 1; + */ + sample.rpc.Order getOrder(int index); + /** + * repeated .rpc.Order order = 1; + */ + int getOrderCount(); + /** + * repeated .rpc.Order order = 1; + */ + java.util.List + getOrderOrBuilderList(); + /** + * repeated .rpc.Order order = 1; + */ + sample.rpc.OrderOrBuilder getOrderOrBuilder( + int index); +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/ItemOrder.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/ItemOrder.java new file mode 100644 index 00000000..e23e2a4d --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/ItemOrder.java @@ -0,0 +1,505 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.ItemOrder} + */ +public final class ItemOrder extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.ItemOrder) + ItemOrderOrBuilder { +private static final long serialVersionUID = 0L; + // Use ItemOrder.newBuilder() to construct. + private ItemOrder(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ItemOrder() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new ItemOrder(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_ItemOrder_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_ItemOrder_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.ItemOrder.class, sample.rpc.ItemOrder.Builder.class); + } + + public static final int ITEM_ID_FIELD_NUMBER = 1; + private int itemId_ = 0; + /** + * int32 item_id = 1; + * @return The itemId. + */ + @java.lang.Override + public int getItemId() { + return itemId_; + } + + public static final int COUNT_FIELD_NUMBER = 2; + private int count_ = 0; + /** + * int32 count = 2; + * @return The count. + */ + @java.lang.Override + public int getCount() { + return count_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (itemId_ != 0) { + output.writeInt32(1, itemId_); + } + if (count_ != 0) { + output.writeInt32(2, count_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (itemId_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, itemId_); + } + if (count_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, count_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.ItemOrder)) { + return super.equals(obj); + } + sample.rpc.ItemOrder other = (sample.rpc.ItemOrder) obj; + + if (getItemId() + != other.getItemId()) return false; + if (getCount() + != other.getCount()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ITEM_ID_FIELD_NUMBER; + hash = (53 * hash) + getItemId(); + hash = (37 * hash) + COUNT_FIELD_NUMBER; + hash = (53 * hash) + getCount(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.ItemOrder parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.ItemOrder parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.ItemOrder parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.ItemOrder parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.ItemOrder parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.ItemOrder parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.ItemOrder parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.ItemOrder parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.ItemOrder parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.ItemOrder parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.ItemOrder parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.ItemOrder parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.ItemOrder prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.ItemOrder} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.ItemOrder) + sample.rpc.ItemOrderOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_ItemOrder_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_ItemOrder_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.ItemOrder.class, sample.rpc.ItemOrder.Builder.class); + } + + // Construct using sample.rpc.ItemOrder.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + itemId_ = 0; + count_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_ItemOrder_descriptor; + } + + @java.lang.Override + public sample.rpc.ItemOrder getDefaultInstanceForType() { + return sample.rpc.ItemOrder.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.ItemOrder build() { + sample.rpc.ItemOrder result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.ItemOrder buildPartial() { + sample.rpc.ItemOrder result = new sample.rpc.ItemOrder(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.ItemOrder result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.itemId_ = itemId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.count_ = count_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.ItemOrder) { + return mergeFrom((sample.rpc.ItemOrder)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.ItemOrder other) { + if (other == sample.rpc.ItemOrder.getDefaultInstance()) return this; + if (other.getItemId() != 0) { + setItemId(other.getItemId()); + } + if (other.getCount() != 0) { + setCount(other.getCount()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + itemId_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 16: { + count_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private int itemId_ ; + /** + * int32 item_id = 1; + * @return The itemId. + */ + @java.lang.Override + public int getItemId() { + return itemId_; + } + /** + * int32 item_id = 1; + * @param value The itemId to set. + * @return This builder for chaining. + */ + public Builder setItemId(int value) { + + itemId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * int32 item_id = 1; + * @return This builder for chaining. + */ + public Builder clearItemId() { + bitField0_ = (bitField0_ & ~0x00000001); + itemId_ = 0; + onChanged(); + return this; + } + + private int count_ ; + /** + * int32 count = 2; + * @return The count. + */ + @java.lang.Override + public int getCount() { + return count_; + } + /** + * int32 count = 2; + * @param value The count to set. + * @return This builder for chaining. + */ + public Builder setCount(int value) { + + count_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * int32 count = 2; + * @return This builder for chaining. + */ + public Builder clearCount() { + bitField0_ = (bitField0_ & ~0x00000002); + count_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.ItemOrder) + } + + // @@protoc_insertion_point(class_scope:rpc.ItemOrder) + private static final sample.rpc.ItemOrder DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.ItemOrder(); + } + + public static sample.rpc.ItemOrder getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ItemOrder parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.ItemOrder getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/ItemOrderOrBuilder.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/ItemOrderOrBuilder.java new file mode 100644 index 00000000..412c5349 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/ItemOrderOrBuilder.java @@ -0,0 +1,21 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface ItemOrderOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.ItemOrder) + com.google.protobuf.MessageOrBuilder { + + /** + * int32 item_id = 1; + * @return The itemId. + */ + int getItemId(); + + /** + * int32 count = 2; + * @return The count. + */ + int getCount(); +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/Order.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/Order.java new file mode 100644 index 00000000..eee1c7e8 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/Order.java @@ -0,0 +1,1198 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.Order} + */ +public final class Order extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.Order) + OrderOrBuilder { +private static final long serialVersionUID = 0L; + // Use Order.newBuilder() to construct. + private Order(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private Order() { + orderId_ = ""; + customerName_ = ""; + statement_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new Order(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_Order_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_Order_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.Order.class, sample.rpc.Order.Builder.class); + } + + public static final int ORDER_ID_FIELD_NUMBER = 1; + @SuppressWarnings("serial") + private volatile java.lang.Object orderId_ = ""; + /** + * string order_id = 1; + * @return The orderId. + */ + @java.lang.Override + public java.lang.String getOrderId() { + java.lang.Object ref = orderId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + orderId_ = s; + return s; + } + } + /** + * string order_id = 1; + * @return The bytes for orderId. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getOrderIdBytes() { + java.lang.Object ref = orderId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + orderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TIMESTAMP_FIELD_NUMBER = 2; + private long timestamp_ = 0L; + /** + * int64 timestamp = 2; + * @return The timestamp. + */ + @java.lang.Override + public long getTimestamp() { + return timestamp_; + } + + public static final int CUSTOMER_ID_FIELD_NUMBER = 3; + private int customerId_ = 0; + /** + * int32 customer_id = 3; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + + public static final int CUSTOMER_NAME_FIELD_NUMBER = 4; + @SuppressWarnings("serial") + private volatile java.lang.Object customerName_ = ""; + /** + * string customer_name = 4; + * @return The customerName. + */ + @java.lang.Override + public java.lang.String getCustomerName() { + java.lang.Object ref = customerName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + customerName_ = s; + return s; + } + } + /** + * string customer_name = 4; + * @return The bytes for customerName. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getCustomerNameBytes() { + java.lang.Object ref = customerName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + customerName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int STATEMENT_FIELD_NUMBER = 5; + @SuppressWarnings("serial") + private java.util.List statement_; + /** + * repeated .rpc.Statement statement = 5; + */ + @java.lang.Override + public java.util.List getStatementList() { + return statement_; + } + /** + * repeated .rpc.Statement statement = 5; + */ + @java.lang.Override + public java.util.List + getStatementOrBuilderList() { + return statement_; + } + /** + * repeated .rpc.Statement statement = 5; + */ + @java.lang.Override + public int getStatementCount() { + return statement_.size(); + } + /** + * repeated .rpc.Statement statement = 5; + */ + @java.lang.Override + public sample.rpc.Statement getStatement(int index) { + return statement_.get(index); + } + /** + * repeated .rpc.Statement statement = 5; + */ + @java.lang.Override + public sample.rpc.StatementOrBuilder getStatementOrBuilder( + int index) { + return statement_.get(index); + } + + public static final int TOTAL_FIELD_NUMBER = 6; + private int total_ = 0; + /** + * int32 total = 6; + * @return The total. + */ + @java.lang.Override + public int getTotal() { + return total_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(orderId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, orderId_); + } + if (timestamp_ != 0L) { + output.writeInt64(2, timestamp_); + } + if (customerId_ != 0) { + output.writeInt32(3, customerId_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(customerName_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, customerName_); + } + for (int i = 0; i < statement_.size(); i++) { + output.writeMessage(5, statement_.get(i)); + } + if (total_ != 0) { + output.writeInt32(6, total_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(orderId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, orderId_); + } + if (timestamp_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(2, timestamp_); + } + if (customerId_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(3, customerId_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(customerName_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, customerName_); + } + for (int i = 0; i < statement_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, statement_.get(i)); + } + if (total_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(6, total_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.Order)) { + return super.equals(obj); + } + sample.rpc.Order other = (sample.rpc.Order) obj; + + if (!getOrderId() + .equals(other.getOrderId())) return false; + if (getTimestamp() + != other.getTimestamp()) return false; + if (getCustomerId() + != other.getCustomerId()) return false; + if (!getCustomerName() + .equals(other.getCustomerName())) return false; + if (!getStatementList() + .equals(other.getStatementList())) return false; + if (getTotal() + != other.getTotal()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ORDER_ID_FIELD_NUMBER; + hash = (53 * hash) + getOrderId().hashCode(); + hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getTimestamp()); + hash = (37 * hash) + CUSTOMER_ID_FIELD_NUMBER; + hash = (53 * hash) + getCustomerId(); + hash = (37 * hash) + CUSTOMER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getCustomerName().hashCode(); + if (getStatementCount() > 0) { + hash = (37 * hash) + STATEMENT_FIELD_NUMBER; + hash = (53 * hash) + getStatementList().hashCode(); + } + hash = (37 * hash) + TOTAL_FIELD_NUMBER; + hash = (53 * hash) + getTotal(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.Order parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.Order parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.Order parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.Order parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.Order parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.Order parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.Order parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.Order parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.Order parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.Order parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.Order parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.Order parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.Order prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.Order} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.Order) + sample.rpc.OrderOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_Order_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_Order_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.Order.class, sample.rpc.Order.Builder.class); + } + + // Construct using sample.rpc.Order.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + orderId_ = ""; + timestamp_ = 0L; + customerId_ = 0; + customerName_ = ""; + if (statementBuilder_ == null) { + statement_ = java.util.Collections.emptyList(); + } else { + statement_ = null; + statementBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + total_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_Order_descriptor; + } + + @java.lang.Override + public sample.rpc.Order getDefaultInstanceForType() { + return sample.rpc.Order.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.Order build() { + sample.rpc.Order result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.Order buildPartial() { + sample.rpc.Order result = new sample.rpc.Order(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(sample.rpc.Order result) { + if (statementBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0)) { + statement_ = java.util.Collections.unmodifiableList(statement_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.statement_ = statement_; + } else { + result.statement_ = statementBuilder_.build(); + } + } + + private void buildPartial0(sample.rpc.Order result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.orderId_ = orderId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.timestamp_ = timestamp_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.customerId_ = customerId_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.customerName_ = customerName_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.total_ = total_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.Order) { + return mergeFrom((sample.rpc.Order)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.Order other) { + if (other == sample.rpc.Order.getDefaultInstance()) return this; + if (!other.getOrderId().isEmpty()) { + orderId_ = other.orderId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getTimestamp() != 0L) { + setTimestamp(other.getTimestamp()); + } + if (other.getCustomerId() != 0) { + setCustomerId(other.getCustomerId()); + } + if (!other.getCustomerName().isEmpty()) { + customerName_ = other.customerName_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (statementBuilder_ == null) { + if (!other.statement_.isEmpty()) { + if (statement_.isEmpty()) { + statement_ = other.statement_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureStatementIsMutable(); + statement_.addAll(other.statement_); + } + onChanged(); + } + } else { + if (!other.statement_.isEmpty()) { + if (statementBuilder_.isEmpty()) { + statementBuilder_.dispose(); + statementBuilder_ = null; + statement_ = other.statement_; + bitField0_ = (bitField0_ & ~0x00000010); + statementBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getStatementFieldBuilder() : null; + } else { + statementBuilder_.addAllMessages(other.statement_); + } + } + } + if (other.getTotal() != 0) { + setTotal(other.getTotal()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + orderId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: { + timestamp_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 24: { + customerId_ = input.readInt32(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 34: { + customerName_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: { + sample.rpc.Statement m = + input.readMessage( + sample.rpc.Statement.parser(), + extensionRegistry); + if (statementBuilder_ == null) { + ensureStatementIsMutable(); + statement_.add(m); + } else { + statementBuilder_.addMessage(m); + } + break; + } // case 42 + case 48: { + total_ = input.readInt32(); + bitField0_ |= 0x00000020; + break; + } // case 48 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private java.lang.Object orderId_ = ""; + /** + * string order_id = 1; + * @return The orderId. + */ + public java.lang.String getOrderId() { + java.lang.Object ref = orderId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + orderId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string order_id = 1; + * @return The bytes for orderId. + */ + public com.google.protobuf.ByteString + getOrderIdBytes() { + java.lang.Object ref = orderId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + orderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string order_id = 1; + * @param value The orderId to set. + * @return This builder for chaining. + */ + public Builder setOrderId( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + orderId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * string order_id = 1; + * @return This builder for chaining. + */ + public Builder clearOrderId() { + orderId_ = getDefaultInstance().getOrderId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * string order_id = 1; + * @param value The bytes for orderId to set. + * @return This builder for chaining. + */ + public Builder setOrderIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + orderId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private long timestamp_ ; + /** + * int64 timestamp = 2; + * @return The timestamp. + */ + @java.lang.Override + public long getTimestamp() { + return timestamp_; + } + /** + * int64 timestamp = 2; + * @param value The timestamp to set. + * @return This builder for chaining. + */ + public Builder setTimestamp(long value) { + + timestamp_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * int64 timestamp = 2; + * @return This builder for chaining. + */ + public Builder clearTimestamp() { + bitField0_ = (bitField0_ & ~0x00000002); + timestamp_ = 0L; + onChanged(); + return this; + } + + private int customerId_ ; + /** + * int32 customer_id = 3; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + /** + * int32 customer_id = 3; + * @param value The customerId to set. + * @return This builder for chaining. + */ + public Builder setCustomerId(int value) { + + customerId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * int32 customer_id = 3; + * @return This builder for chaining. + */ + public Builder clearCustomerId() { + bitField0_ = (bitField0_ & ~0x00000004); + customerId_ = 0; + onChanged(); + return this; + } + + private java.lang.Object customerName_ = ""; + /** + * string customer_name = 4; + * @return The customerName. + */ + public java.lang.String getCustomerName() { + java.lang.Object ref = customerName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + customerName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string customer_name = 4; + * @return The bytes for customerName. + */ + public com.google.protobuf.ByteString + getCustomerNameBytes() { + java.lang.Object ref = customerName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + customerName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string customer_name = 4; + * @param value The customerName to set. + * @return This builder for chaining. + */ + public Builder setCustomerName( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + customerName_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + /** + * string customer_name = 4; + * @return This builder for chaining. + */ + public Builder clearCustomerName() { + customerName_ = getDefaultInstance().getCustomerName(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + /** + * string customer_name = 4; + * @param value The bytes for customerName to set. + * @return This builder for chaining. + */ + public Builder setCustomerNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + customerName_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private java.util.List statement_ = + java.util.Collections.emptyList(); + private void ensureStatementIsMutable() { + if (!((bitField0_ & 0x00000010) != 0)) { + statement_ = new java.util.ArrayList(statement_); + bitField0_ |= 0x00000010; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + sample.rpc.Statement, sample.rpc.Statement.Builder, sample.rpc.StatementOrBuilder> statementBuilder_; + + /** + * repeated .rpc.Statement statement = 5; + */ + public java.util.List getStatementList() { + if (statementBuilder_ == null) { + return java.util.Collections.unmodifiableList(statement_); + } else { + return statementBuilder_.getMessageList(); + } + } + /** + * repeated .rpc.Statement statement = 5; + */ + public int getStatementCount() { + if (statementBuilder_ == null) { + return statement_.size(); + } else { + return statementBuilder_.getCount(); + } + } + /** + * repeated .rpc.Statement statement = 5; + */ + public sample.rpc.Statement getStatement(int index) { + if (statementBuilder_ == null) { + return statement_.get(index); + } else { + return statementBuilder_.getMessage(index); + } + } + /** + * repeated .rpc.Statement statement = 5; + */ + public Builder setStatement( + int index, sample.rpc.Statement value) { + if (statementBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatementIsMutable(); + statement_.set(index, value); + onChanged(); + } else { + statementBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .rpc.Statement statement = 5; + */ + public Builder setStatement( + int index, sample.rpc.Statement.Builder builderForValue) { + if (statementBuilder_ == null) { + ensureStatementIsMutable(); + statement_.set(index, builderForValue.build()); + onChanged(); + } else { + statementBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .rpc.Statement statement = 5; + */ + public Builder addStatement(sample.rpc.Statement value) { + if (statementBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatementIsMutable(); + statement_.add(value); + onChanged(); + } else { + statementBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .rpc.Statement statement = 5; + */ + public Builder addStatement( + int index, sample.rpc.Statement value) { + if (statementBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatementIsMutable(); + statement_.add(index, value); + onChanged(); + } else { + statementBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .rpc.Statement statement = 5; + */ + public Builder addStatement( + sample.rpc.Statement.Builder builderForValue) { + if (statementBuilder_ == null) { + ensureStatementIsMutable(); + statement_.add(builderForValue.build()); + onChanged(); + } else { + statementBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .rpc.Statement statement = 5; + */ + public Builder addStatement( + int index, sample.rpc.Statement.Builder builderForValue) { + if (statementBuilder_ == null) { + ensureStatementIsMutable(); + statement_.add(index, builderForValue.build()); + onChanged(); + } else { + statementBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .rpc.Statement statement = 5; + */ + public Builder addAllStatement( + java.lang.Iterable values) { + if (statementBuilder_ == null) { + ensureStatementIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, statement_); + onChanged(); + } else { + statementBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .rpc.Statement statement = 5; + */ + public Builder clearStatement() { + if (statementBuilder_ == null) { + statement_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + } else { + statementBuilder_.clear(); + } + return this; + } + /** + * repeated .rpc.Statement statement = 5; + */ + public Builder removeStatement(int index) { + if (statementBuilder_ == null) { + ensureStatementIsMutable(); + statement_.remove(index); + onChanged(); + } else { + statementBuilder_.remove(index); + } + return this; + } + /** + * repeated .rpc.Statement statement = 5; + */ + public sample.rpc.Statement.Builder getStatementBuilder( + int index) { + return getStatementFieldBuilder().getBuilder(index); + } + /** + * repeated .rpc.Statement statement = 5; + */ + public sample.rpc.StatementOrBuilder getStatementOrBuilder( + int index) { + if (statementBuilder_ == null) { + return statement_.get(index); } else { + return statementBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .rpc.Statement statement = 5; + */ + public java.util.List + getStatementOrBuilderList() { + if (statementBuilder_ != null) { + return statementBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(statement_); + } + } + /** + * repeated .rpc.Statement statement = 5; + */ + public sample.rpc.Statement.Builder addStatementBuilder() { + return getStatementFieldBuilder().addBuilder( + sample.rpc.Statement.getDefaultInstance()); + } + /** + * repeated .rpc.Statement statement = 5; + */ + public sample.rpc.Statement.Builder addStatementBuilder( + int index) { + return getStatementFieldBuilder().addBuilder( + index, sample.rpc.Statement.getDefaultInstance()); + } + /** + * repeated .rpc.Statement statement = 5; + */ + public java.util.List + getStatementBuilderList() { + return getStatementFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + sample.rpc.Statement, sample.rpc.Statement.Builder, sample.rpc.StatementOrBuilder> + getStatementFieldBuilder() { + if (statementBuilder_ == null) { + statementBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + sample.rpc.Statement, sample.rpc.Statement.Builder, sample.rpc.StatementOrBuilder>( + statement_, + ((bitField0_ & 0x00000010) != 0), + getParentForChildren(), + isClean()); + statement_ = null; + } + return statementBuilder_; + } + + private int total_ ; + /** + * int32 total = 6; + * @return The total. + */ + @java.lang.Override + public int getTotal() { + return total_; + } + /** + * int32 total = 6; + * @param value The total to set. + * @return This builder for chaining. + */ + public Builder setTotal(int value) { + + total_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + /** + * int32 total = 6; + * @return This builder for chaining. + */ + public Builder clearTotal() { + bitField0_ = (bitField0_ & ~0x00000020); + total_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.Order) + } + + // @@protoc_insertion_point(class_scope:rpc.Order) + private static final sample.rpc.Order DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.Order(); + } + + public static sample.rpc.Order getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Order parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.Order getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/OrderOrBuilder.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/OrderOrBuilder.java new file mode 100644 index 00000000..d2f9598e --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/OrderOrBuilder.java @@ -0,0 +1,75 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface OrderOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.Order) + com.google.protobuf.MessageOrBuilder { + + /** + * string order_id = 1; + * @return The orderId. + */ + java.lang.String getOrderId(); + /** + * string order_id = 1; + * @return The bytes for orderId. + */ + com.google.protobuf.ByteString + getOrderIdBytes(); + + /** + * int64 timestamp = 2; + * @return The timestamp. + */ + long getTimestamp(); + + /** + * int32 customer_id = 3; + * @return The customerId. + */ + int getCustomerId(); + + /** + * string customer_name = 4; + * @return The customerName. + */ + java.lang.String getCustomerName(); + /** + * string customer_name = 4; + * @return The bytes for customerName. + */ + com.google.protobuf.ByteString + getCustomerNameBytes(); + + /** + * repeated .rpc.Statement statement = 5; + */ + java.util.List + getStatementList(); + /** + * repeated .rpc.Statement statement = 5; + */ + sample.rpc.Statement getStatement(int index); + /** + * repeated .rpc.Statement statement = 5; + */ + int getStatementCount(); + /** + * repeated .rpc.Statement statement = 5; + */ + java.util.List + getStatementOrBuilderList(); + /** + * repeated .rpc.Statement statement = 5; + */ + sample.rpc.StatementOrBuilder getStatementOrBuilder( + int index); + + /** + * int32 total = 6; + * @return The total. + */ + int getTotal(); +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PaymentRequest.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PaymentRequest.java new file mode 100644 index 00000000..ccb770ba --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PaymentRequest.java @@ -0,0 +1,641 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.PaymentRequest} + */ +public final class PaymentRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.PaymentRequest) + PaymentRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use PaymentRequest.newBuilder() to construct. + private PaymentRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private PaymentRequest() { + transactionId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new PaymentRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_PaymentRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_PaymentRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.PaymentRequest.class, sample.rpc.PaymentRequest.Builder.class); + } + + public static final int TRANSACTION_ID_FIELD_NUMBER = 1; + @SuppressWarnings("serial") + private volatile java.lang.Object transactionId_ = ""; + /** + * string transaction_id = 1; + * @return The transactionId. + */ + @java.lang.Override + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } + } + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CUSTOMER_ID_FIELD_NUMBER = 2; + private int customerId_ = 0; + /** + * int32 customer_id = 2; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + + public static final int AMOUNT_FIELD_NUMBER = 3; + private int amount_ = 0; + /** + * int32 amount = 3; + * @return The amount. + */ + @java.lang.Override + public int getAmount() { + return amount_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(transactionId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, transactionId_); + } + if (customerId_ != 0) { + output.writeInt32(2, customerId_); + } + if (amount_ != 0) { + output.writeInt32(3, amount_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(transactionId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, transactionId_); + } + if (customerId_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, customerId_); + } + if (amount_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(3, amount_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.PaymentRequest)) { + return super.equals(obj); + } + sample.rpc.PaymentRequest other = (sample.rpc.PaymentRequest) obj; + + if (!getTransactionId() + .equals(other.getTransactionId())) return false; + if (getCustomerId() + != other.getCustomerId()) return false; + if (getAmount() + != other.getAmount()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TRANSACTION_ID_FIELD_NUMBER; + hash = (53 * hash) + getTransactionId().hashCode(); + hash = (37 * hash) + CUSTOMER_ID_FIELD_NUMBER; + hash = (53 * hash) + getCustomerId(); + hash = (37 * hash) + AMOUNT_FIELD_NUMBER; + hash = (53 * hash) + getAmount(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.PaymentRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PaymentRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PaymentRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PaymentRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PaymentRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PaymentRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PaymentRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.PaymentRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.PaymentRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.PaymentRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.PaymentRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.PaymentRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.PaymentRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.PaymentRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.PaymentRequest) + sample.rpc.PaymentRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_PaymentRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_PaymentRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.PaymentRequest.class, sample.rpc.PaymentRequest.Builder.class); + } + + // Construct using sample.rpc.PaymentRequest.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + transactionId_ = ""; + customerId_ = 0; + amount_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_PaymentRequest_descriptor; + } + + @java.lang.Override + public sample.rpc.PaymentRequest getDefaultInstanceForType() { + return sample.rpc.PaymentRequest.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.PaymentRequest build() { + sample.rpc.PaymentRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.PaymentRequest buildPartial() { + sample.rpc.PaymentRequest result = new sample.rpc.PaymentRequest(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.PaymentRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.transactionId_ = transactionId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.customerId_ = customerId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.amount_ = amount_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.PaymentRequest) { + return mergeFrom((sample.rpc.PaymentRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.PaymentRequest other) { + if (other == sample.rpc.PaymentRequest.getDefaultInstance()) return this; + if (!other.getTransactionId().isEmpty()) { + transactionId_ = other.transactionId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getCustomerId() != 0) { + setCustomerId(other.getCustomerId()); + } + if (other.getAmount() != 0) { + setAmount(other.getAmount()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + transactionId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: { + customerId_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 24: { + amount_ = input.readInt32(); + bitField0_ |= 0x00000004; + break; + } // case 24 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private java.lang.Object transactionId_ = ""; + /** + * string transaction_id = 1; + * @return The transactionId. + */ + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string transaction_id = 1; + * @param value The transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionId( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * string transaction_id = 1; + * @return This builder for chaining. + */ + public Builder clearTransactionId() { + transactionId_ = getDefaultInstance().getTransactionId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * string transaction_id = 1; + * @param value The bytes for transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int customerId_ ; + /** + * int32 customer_id = 2; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + /** + * int32 customer_id = 2; + * @param value The customerId to set. + * @return This builder for chaining. + */ + public Builder setCustomerId(int value) { + + customerId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * int32 customer_id = 2; + * @return This builder for chaining. + */ + public Builder clearCustomerId() { + bitField0_ = (bitField0_ & ~0x00000002); + customerId_ = 0; + onChanged(); + return this; + } + + private int amount_ ; + /** + * int32 amount = 3; + * @return The amount. + */ + @java.lang.Override + public int getAmount() { + return amount_; + } + /** + * int32 amount = 3; + * @param value The amount to set. + * @return This builder for chaining. + */ + public Builder setAmount(int value) { + + amount_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * int32 amount = 3; + * @return This builder for chaining. + */ + public Builder clearAmount() { + bitField0_ = (bitField0_ & ~0x00000004); + amount_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.PaymentRequest) + } + + // @@protoc_insertion_point(class_scope:rpc.PaymentRequest) + private static final sample.rpc.PaymentRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.PaymentRequest(); + } + + public static sample.rpc.PaymentRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PaymentRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.PaymentRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PaymentRequestOrBuilder.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PaymentRequestOrBuilder.java new file mode 100644 index 00000000..bee10612 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PaymentRequestOrBuilder.java @@ -0,0 +1,33 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface PaymentRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.PaymentRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * string transaction_id = 1; + * @return The transactionId. + */ + java.lang.String getTransactionId(); + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + com.google.protobuf.ByteString + getTransactionIdBytes(); + + /** + * int32 customer_id = 2; + * @return The customerId. + */ + int getCustomerId(); + + /** + * int32 amount = 3; + * @return The amount. + */ + int getAmount(); +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderRequest.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderRequest.java new file mode 100644 index 00000000..88754013 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderRequest.java @@ -0,0 +1,793 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.PlaceOrderRequest} + */ +public final class PlaceOrderRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.PlaceOrderRequest) + PlaceOrderRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use PlaceOrderRequest.newBuilder() to construct. + private PlaceOrderRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private PlaceOrderRequest() { + itemOrder_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new PlaceOrderRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_PlaceOrderRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_PlaceOrderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.PlaceOrderRequest.class, sample.rpc.PlaceOrderRequest.Builder.class); + } + + public static final int CUSTOMER_ID_FIELD_NUMBER = 1; + private int customerId_ = 0; + /** + * int32 customer_id = 1; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + + public static final int ITEM_ORDER_FIELD_NUMBER = 2; + @SuppressWarnings("serial") + private java.util.List itemOrder_; + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + @java.lang.Override + public java.util.List getItemOrderList() { + return itemOrder_; + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + @java.lang.Override + public java.util.List + getItemOrderOrBuilderList() { + return itemOrder_; + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + @java.lang.Override + public int getItemOrderCount() { + return itemOrder_.size(); + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + @java.lang.Override + public sample.rpc.ItemOrder getItemOrder(int index) { + return itemOrder_.get(index); + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + @java.lang.Override + public sample.rpc.ItemOrderOrBuilder getItemOrderOrBuilder( + int index) { + return itemOrder_.get(index); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (customerId_ != 0) { + output.writeInt32(1, customerId_); + } + for (int i = 0; i < itemOrder_.size(); i++) { + output.writeMessage(2, itemOrder_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (customerId_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, customerId_); + } + for (int i = 0; i < itemOrder_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, itemOrder_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.PlaceOrderRequest)) { + return super.equals(obj); + } + sample.rpc.PlaceOrderRequest other = (sample.rpc.PlaceOrderRequest) obj; + + if (getCustomerId() + != other.getCustomerId()) return false; + if (!getItemOrderList() + .equals(other.getItemOrderList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + CUSTOMER_ID_FIELD_NUMBER; + hash = (53 * hash) + getCustomerId(); + if (getItemOrderCount() > 0) { + hash = (37 * hash) + ITEM_ORDER_FIELD_NUMBER; + hash = (53 * hash) + getItemOrderList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.PlaceOrderRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PlaceOrderRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PlaceOrderRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PlaceOrderRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PlaceOrderRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PlaceOrderRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PlaceOrderRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.PlaceOrderRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.PlaceOrderRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.PlaceOrderRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.PlaceOrderRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.PlaceOrderRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.PlaceOrderRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.PlaceOrderRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.PlaceOrderRequest) + sample.rpc.PlaceOrderRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_PlaceOrderRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_PlaceOrderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.PlaceOrderRequest.class, sample.rpc.PlaceOrderRequest.Builder.class); + } + + // Construct using sample.rpc.PlaceOrderRequest.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + customerId_ = 0; + if (itemOrderBuilder_ == null) { + itemOrder_ = java.util.Collections.emptyList(); + } else { + itemOrder_ = null; + itemOrderBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_PlaceOrderRequest_descriptor; + } + + @java.lang.Override + public sample.rpc.PlaceOrderRequest getDefaultInstanceForType() { + return sample.rpc.PlaceOrderRequest.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.PlaceOrderRequest build() { + sample.rpc.PlaceOrderRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.PlaceOrderRequest buildPartial() { + sample.rpc.PlaceOrderRequest result = new sample.rpc.PlaceOrderRequest(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(sample.rpc.PlaceOrderRequest result) { + if (itemOrderBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + itemOrder_ = java.util.Collections.unmodifiableList(itemOrder_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.itemOrder_ = itemOrder_; + } else { + result.itemOrder_ = itemOrderBuilder_.build(); + } + } + + private void buildPartial0(sample.rpc.PlaceOrderRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.customerId_ = customerId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.PlaceOrderRequest) { + return mergeFrom((sample.rpc.PlaceOrderRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.PlaceOrderRequest other) { + if (other == sample.rpc.PlaceOrderRequest.getDefaultInstance()) return this; + if (other.getCustomerId() != 0) { + setCustomerId(other.getCustomerId()); + } + if (itemOrderBuilder_ == null) { + if (!other.itemOrder_.isEmpty()) { + if (itemOrder_.isEmpty()) { + itemOrder_ = other.itemOrder_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureItemOrderIsMutable(); + itemOrder_.addAll(other.itemOrder_); + } + onChanged(); + } + } else { + if (!other.itemOrder_.isEmpty()) { + if (itemOrderBuilder_.isEmpty()) { + itemOrderBuilder_.dispose(); + itemOrderBuilder_ = null; + itemOrder_ = other.itemOrder_; + bitField0_ = (bitField0_ & ~0x00000002); + itemOrderBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getItemOrderFieldBuilder() : null; + } else { + itemOrderBuilder_.addAllMessages(other.itemOrder_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + customerId_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: { + sample.rpc.ItemOrder m = + input.readMessage( + sample.rpc.ItemOrder.parser(), + extensionRegistry); + if (itemOrderBuilder_ == null) { + ensureItemOrderIsMutable(); + itemOrder_.add(m); + } else { + itemOrderBuilder_.addMessage(m); + } + break; + } // case 18 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private int customerId_ ; + /** + * int32 customer_id = 1; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + /** + * int32 customer_id = 1; + * @param value The customerId to set. + * @return This builder for chaining. + */ + public Builder setCustomerId(int value) { + + customerId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * int32 customer_id = 1; + * @return This builder for chaining. + */ + public Builder clearCustomerId() { + bitField0_ = (bitField0_ & ~0x00000001); + customerId_ = 0; + onChanged(); + return this; + } + + private java.util.List itemOrder_ = + java.util.Collections.emptyList(); + private void ensureItemOrderIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + itemOrder_ = new java.util.ArrayList(itemOrder_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + sample.rpc.ItemOrder, sample.rpc.ItemOrder.Builder, sample.rpc.ItemOrderOrBuilder> itemOrderBuilder_; + + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public java.util.List getItemOrderList() { + if (itemOrderBuilder_ == null) { + return java.util.Collections.unmodifiableList(itemOrder_); + } else { + return itemOrderBuilder_.getMessageList(); + } + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public int getItemOrderCount() { + if (itemOrderBuilder_ == null) { + return itemOrder_.size(); + } else { + return itemOrderBuilder_.getCount(); + } + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public sample.rpc.ItemOrder getItemOrder(int index) { + if (itemOrderBuilder_ == null) { + return itemOrder_.get(index); + } else { + return itemOrderBuilder_.getMessage(index); + } + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public Builder setItemOrder( + int index, sample.rpc.ItemOrder value) { + if (itemOrderBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureItemOrderIsMutable(); + itemOrder_.set(index, value); + onChanged(); + } else { + itemOrderBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public Builder setItemOrder( + int index, sample.rpc.ItemOrder.Builder builderForValue) { + if (itemOrderBuilder_ == null) { + ensureItemOrderIsMutable(); + itemOrder_.set(index, builderForValue.build()); + onChanged(); + } else { + itemOrderBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public Builder addItemOrder(sample.rpc.ItemOrder value) { + if (itemOrderBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureItemOrderIsMutable(); + itemOrder_.add(value); + onChanged(); + } else { + itemOrderBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public Builder addItemOrder( + int index, sample.rpc.ItemOrder value) { + if (itemOrderBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureItemOrderIsMutable(); + itemOrder_.add(index, value); + onChanged(); + } else { + itemOrderBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public Builder addItemOrder( + sample.rpc.ItemOrder.Builder builderForValue) { + if (itemOrderBuilder_ == null) { + ensureItemOrderIsMutable(); + itemOrder_.add(builderForValue.build()); + onChanged(); + } else { + itemOrderBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public Builder addItemOrder( + int index, sample.rpc.ItemOrder.Builder builderForValue) { + if (itemOrderBuilder_ == null) { + ensureItemOrderIsMutable(); + itemOrder_.add(index, builderForValue.build()); + onChanged(); + } else { + itemOrderBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public Builder addAllItemOrder( + java.lang.Iterable values) { + if (itemOrderBuilder_ == null) { + ensureItemOrderIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, itemOrder_); + onChanged(); + } else { + itemOrderBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public Builder clearItemOrder() { + if (itemOrderBuilder_ == null) { + itemOrder_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + itemOrderBuilder_.clear(); + } + return this; + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public Builder removeItemOrder(int index) { + if (itemOrderBuilder_ == null) { + ensureItemOrderIsMutable(); + itemOrder_.remove(index); + onChanged(); + } else { + itemOrderBuilder_.remove(index); + } + return this; + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public sample.rpc.ItemOrder.Builder getItemOrderBuilder( + int index) { + return getItemOrderFieldBuilder().getBuilder(index); + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public sample.rpc.ItemOrderOrBuilder getItemOrderOrBuilder( + int index) { + if (itemOrderBuilder_ == null) { + return itemOrder_.get(index); } else { + return itemOrderBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public java.util.List + getItemOrderOrBuilderList() { + if (itemOrderBuilder_ != null) { + return itemOrderBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(itemOrder_); + } + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public sample.rpc.ItemOrder.Builder addItemOrderBuilder() { + return getItemOrderFieldBuilder().addBuilder( + sample.rpc.ItemOrder.getDefaultInstance()); + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public sample.rpc.ItemOrder.Builder addItemOrderBuilder( + int index) { + return getItemOrderFieldBuilder().addBuilder( + index, sample.rpc.ItemOrder.getDefaultInstance()); + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public java.util.List + getItemOrderBuilderList() { + return getItemOrderFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + sample.rpc.ItemOrder, sample.rpc.ItemOrder.Builder, sample.rpc.ItemOrderOrBuilder> + getItemOrderFieldBuilder() { + if (itemOrderBuilder_ == null) { + itemOrderBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + sample.rpc.ItemOrder, sample.rpc.ItemOrder.Builder, sample.rpc.ItemOrderOrBuilder>( + itemOrder_, + ((bitField0_ & 0x00000002) != 0), + getParentForChildren(), + isClean()); + itemOrder_ = null; + } + return itemOrderBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.PlaceOrderRequest) + } + + // @@protoc_insertion_point(class_scope:rpc.PlaceOrderRequest) + private static final sample.rpc.PlaceOrderRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.PlaceOrderRequest(); + } + + public static sample.rpc.PlaceOrderRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PlaceOrderRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.PlaceOrderRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderRequestOrBuilder.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderRequestOrBuilder.java new file mode 100644 index 00000000..7f996bbf --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderRequestOrBuilder.java @@ -0,0 +1,39 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface PlaceOrderRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.PlaceOrderRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * int32 customer_id = 1; + * @return The customerId. + */ + int getCustomerId(); + + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + java.util.List + getItemOrderList(); + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + sample.rpc.ItemOrder getItemOrder(int index); + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + int getItemOrderCount(); + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + java.util.List + getItemOrderOrBuilderList(); + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + sample.rpc.ItemOrderOrBuilder getItemOrderOrBuilder( + int index); +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderResponse.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderResponse.java new file mode 100644 index 00000000..455445ad --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderResponse.java @@ -0,0 +1,509 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.PlaceOrderResponse} + */ +public final class PlaceOrderResponse extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.PlaceOrderResponse) + PlaceOrderResponseOrBuilder { +private static final long serialVersionUID = 0L; + // Use PlaceOrderResponse.newBuilder() to construct. + private PlaceOrderResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private PlaceOrderResponse() { + orderId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new PlaceOrderResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_PlaceOrderResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_PlaceOrderResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.PlaceOrderResponse.class, sample.rpc.PlaceOrderResponse.Builder.class); + } + + public static final int ORDER_ID_FIELD_NUMBER = 1; + @SuppressWarnings("serial") + private volatile java.lang.Object orderId_ = ""; + /** + * string order_id = 1; + * @return The orderId. + */ + @java.lang.Override + public java.lang.String getOrderId() { + java.lang.Object ref = orderId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + orderId_ = s; + return s; + } + } + /** + * string order_id = 1; + * @return The bytes for orderId. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getOrderIdBytes() { + java.lang.Object ref = orderId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + orderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(orderId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, orderId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(orderId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, orderId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.PlaceOrderResponse)) { + return super.equals(obj); + } + sample.rpc.PlaceOrderResponse other = (sample.rpc.PlaceOrderResponse) obj; + + if (!getOrderId() + .equals(other.getOrderId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ORDER_ID_FIELD_NUMBER; + hash = (53 * hash) + getOrderId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.PlaceOrderResponse parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PlaceOrderResponse parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PlaceOrderResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PlaceOrderResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PlaceOrderResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PlaceOrderResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PlaceOrderResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.PlaceOrderResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.PlaceOrderResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.PlaceOrderResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.PlaceOrderResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.PlaceOrderResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.PlaceOrderResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.PlaceOrderResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.PlaceOrderResponse) + sample.rpc.PlaceOrderResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_PlaceOrderResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_PlaceOrderResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.PlaceOrderResponse.class, sample.rpc.PlaceOrderResponse.Builder.class); + } + + // Construct using sample.rpc.PlaceOrderResponse.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + orderId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_PlaceOrderResponse_descriptor; + } + + @java.lang.Override + public sample.rpc.PlaceOrderResponse getDefaultInstanceForType() { + return sample.rpc.PlaceOrderResponse.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.PlaceOrderResponse build() { + sample.rpc.PlaceOrderResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.PlaceOrderResponse buildPartial() { + sample.rpc.PlaceOrderResponse result = new sample.rpc.PlaceOrderResponse(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.PlaceOrderResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.orderId_ = orderId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.PlaceOrderResponse) { + return mergeFrom((sample.rpc.PlaceOrderResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.PlaceOrderResponse other) { + if (other == sample.rpc.PlaceOrderResponse.getDefaultInstance()) return this; + if (!other.getOrderId().isEmpty()) { + orderId_ = other.orderId_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + orderId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private java.lang.Object orderId_ = ""; + /** + * string order_id = 1; + * @return The orderId. + */ + public java.lang.String getOrderId() { + java.lang.Object ref = orderId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + orderId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string order_id = 1; + * @return The bytes for orderId. + */ + public com.google.protobuf.ByteString + getOrderIdBytes() { + java.lang.Object ref = orderId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + orderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string order_id = 1; + * @param value The orderId to set. + * @return This builder for chaining. + */ + public Builder setOrderId( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + orderId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * string order_id = 1; + * @return This builder for chaining. + */ + public Builder clearOrderId() { + orderId_ = getDefaultInstance().getOrderId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * string order_id = 1; + * @param value The bytes for orderId to set. + * @return This builder for chaining. + */ + public Builder setOrderIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + orderId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.PlaceOrderResponse) + } + + // @@protoc_insertion_point(class_scope:rpc.PlaceOrderResponse) + private static final sample.rpc.PlaceOrderResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.PlaceOrderResponse(); + } + + public static sample.rpc.PlaceOrderResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PlaceOrderResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.PlaceOrderResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderResponseOrBuilder.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderResponseOrBuilder.java new file mode 100644 index 00000000..55ee1b9b --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderResponseOrBuilder.java @@ -0,0 +1,21 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface PlaceOrderResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.PlaceOrderResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * string order_id = 1; + * @return The orderId. + */ + java.lang.String getOrderId(); + /** + * string order_id = 1; + * @return The bytes for orderId. + */ + com.google.protobuf.ByteString + getOrderIdBytes(); +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PrepareRequest.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PrepareRequest.java new file mode 100644 index 00000000..c4cab4cd --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PrepareRequest.java @@ -0,0 +1,509 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.PrepareRequest} + */ +public final class PrepareRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.PrepareRequest) + PrepareRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use PrepareRequest.newBuilder() to construct. + private PrepareRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private PrepareRequest() { + transactionId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new PrepareRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_PrepareRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_PrepareRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.PrepareRequest.class, sample.rpc.PrepareRequest.Builder.class); + } + + public static final int TRANSACTION_ID_FIELD_NUMBER = 1; + @SuppressWarnings("serial") + private volatile java.lang.Object transactionId_ = ""; + /** + * string transaction_id = 1; + * @return The transactionId. + */ + @java.lang.Override + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } + } + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(transactionId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, transactionId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(transactionId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, transactionId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.PrepareRequest)) { + return super.equals(obj); + } + sample.rpc.PrepareRequest other = (sample.rpc.PrepareRequest) obj; + + if (!getTransactionId() + .equals(other.getTransactionId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TRANSACTION_ID_FIELD_NUMBER; + hash = (53 * hash) + getTransactionId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.PrepareRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PrepareRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PrepareRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PrepareRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PrepareRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PrepareRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PrepareRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.PrepareRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.PrepareRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.PrepareRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.PrepareRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.PrepareRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.PrepareRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.PrepareRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.PrepareRequest) + sample.rpc.PrepareRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_PrepareRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_PrepareRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.PrepareRequest.class, sample.rpc.PrepareRequest.Builder.class); + } + + // Construct using sample.rpc.PrepareRequest.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + transactionId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_PrepareRequest_descriptor; + } + + @java.lang.Override + public sample.rpc.PrepareRequest getDefaultInstanceForType() { + return sample.rpc.PrepareRequest.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.PrepareRequest build() { + sample.rpc.PrepareRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.PrepareRequest buildPartial() { + sample.rpc.PrepareRequest result = new sample.rpc.PrepareRequest(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.PrepareRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.transactionId_ = transactionId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.PrepareRequest) { + return mergeFrom((sample.rpc.PrepareRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.PrepareRequest other) { + if (other == sample.rpc.PrepareRequest.getDefaultInstance()) return this; + if (!other.getTransactionId().isEmpty()) { + transactionId_ = other.transactionId_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + transactionId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private java.lang.Object transactionId_ = ""; + /** + * string transaction_id = 1; + * @return The transactionId. + */ + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string transaction_id = 1; + * @param value The transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionId( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * string transaction_id = 1; + * @return This builder for chaining. + */ + public Builder clearTransactionId() { + transactionId_ = getDefaultInstance().getTransactionId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * string transaction_id = 1; + * @param value The bytes for transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.PrepareRequest) + } + + // @@protoc_insertion_point(class_scope:rpc.PrepareRequest) + private static final sample.rpc.PrepareRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.PrepareRequest(); + } + + public static sample.rpc.PrepareRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PrepareRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.PrepareRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PrepareRequestOrBuilder.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PrepareRequestOrBuilder.java new file mode 100644 index 00000000..e5648535 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/PrepareRequestOrBuilder.java @@ -0,0 +1,21 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface PrepareRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.PrepareRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * string transaction_id = 1; + * @return The transactionId. + */ + java.lang.String getTransactionId(); + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + com.google.protobuf.ByteString + getTransactionIdBytes(); +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/RepaymentRequest.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/RepaymentRequest.java new file mode 100644 index 00000000..0a037f4d --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/RepaymentRequest.java @@ -0,0 +1,505 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.RepaymentRequest} + */ +public final class RepaymentRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.RepaymentRequest) + RepaymentRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use RepaymentRequest.newBuilder() to construct. + private RepaymentRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private RepaymentRequest() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new RepaymentRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_RepaymentRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_RepaymentRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.RepaymentRequest.class, sample.rpc.RepaymentRequest.Builder.class); + } + + public static final int CUSTOMER_ID_FIELD_NUMBER = 1; + private int customerId_ = 0; + /** + * int32 customer_id = 1; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + + public static final int AMOUNT_FIELD_NUMBER = 2; + private int amount_ = 0; + /** + * int32 amount = 2; + * @return The amount. + */ + @java.lang.Override + public int getAmount() { + return amount_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (customerId_ != 0) { + output.writeInt32(1, customerId_); + } + if (amount_ != 0) { + output.writeInt32(2, amount_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (customerId_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, customerId_); + } + if (amount_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, amount_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.RepaymentRequest)) { + return super.equals(obj); + } + sample.rpc.RepaymentRequest other = (sample.rpc.RepaymentRequest) obj; + + if (getCustomerId() + != other.getCustomerId()) return false; + if (getAmount() + != other.getAmount()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + CUSTOMER_ID_FIELD_NUMBER; + hash = (53 * hash) + getCustomerId(); + hash = (37 * hash) + AMOUNT_FIELD_NUMBER; + hash = (53 * hash) + getAmount(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.RepaymentRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.RepaymentRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.RepaymentRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.RepaymentRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.RepaymentRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.RepaymentRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.RepaymentRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.RepaymentRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.RepaymentRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.RepaymentRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.RepaymentRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.RepaymentRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.RepaymentRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.RepaymentRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.RepaymentRequest) + sample.rpc.RepaymentRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_RepaymentRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_RepaymentRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.RepaymentRequest.class, sample.rpc.RepaymentRequest.Builder.class); + } + + // Construct using sample.rpc.RepaymentRequest.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + customerId_ = 0; + amount_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_RepaymentRequest_descriptor; + } + + @java.lang.Override + public sample.rpc.RepaymentRequest getDefaultInstanceForType() { + return sample.rpc.RepaymentRequest.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.RepaymentRequest build() { + sample.rpc.RepaymentRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.RepaymentRequest buildPartial() { + sample.rpc.RepaymentRequest result = new sample.rpc.RepaymentRequest(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.RepaymentRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.customerId_ = customerId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.amount_ = amount_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.RepaymentRequest) { + return mergeFrom((sample.rpc.RepaymentRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.RepaymentRequest other) { + if (other == sample.rpc.RepaymentRequest.getDefaultInstance()) return this; + if (other.getCustomerId() != 0) { + setCustomerId(other.getCustomerId()); + } + if (other.getAmount() != 0) { + setAmount(other.getAmount()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + customerId_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 16: { + amount_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private int customerId_ ; + /** + * int32 customer_id = 1; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + /** + * int32 customer_id = 1; + * @param value The customerId to set. + * @return This builder for chaining. + */ + public Builder setCustomerId(int value) { + + customerId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * int32 customer_id = 1; + * @return This builder for chaining. + */ + public Builder clearCustomerId() { + bitField0_ = (bitField0_ & ~0x00000001); + customerId_ = 0; + onChanged(); + return this; + } + + private int amount_ ; + /** + * int32 amount = 2; + * @return The amount. + */ + @java.lang.Override + public int getAmount() { + return amount_; + } + /** + * int32 amount = 2; + * @param value The amount to set. + * @return This builder for chaining. + */ + public Builder setAmount(int value) { + + amount_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * int32 amount = 2; + * @return This builder for chaining. + */ + public Builder clearAmount() { + bitField0_ = (bitField0_ & ~0x00000002); + amount_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.RepaymentRequest) + } + + // @@protoc_insertion_point(class_scope:rpc.RepaymentRequest) + private static final sample.rpc.RepaymentRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.RepaymentRequest(); + } + + public static sample.rpc.RepaymentRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RepaymentRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.RepaymentRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/RepaymentRequestOrBuilder.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/RepaymentRequestOrBuilder.java new file mode 100644 index 00000000..c373e5c3 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/RepaymentRequestOrBuilder.java @@ -0,0 +1,21 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface RepaymentRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.RepaymentRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * int32 customer_id = 1; + * @return The customerId. + */ + int getCustomerId(); + + /** + * int32 amount = 2; + * @return The amount. + */ + int getAmount(); +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/RollbackRequest.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/RollbackRequest.java new file mode 100644 index 00000000..710b30ff --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/RollbackRequest.java @@ -0,0 +1,509 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.RollbackRequest} + */ +public final class RollbackRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.RollbackRequest) + RollbackRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use RollbackRequest.newBuilder() to construct. + private RollbackRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private RollbackRequest() { + transactionId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new RollbackRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_RollbackRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_RollbackRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.RollbackRequest.class, sample.rpc.RollbackRequest.Builder.class); + } + + public static final int TRANSACTION_ID_FIELD_NUMBER = 1; + @SuppressWarnings("serial") + private volatile java.lang.Object transactionId_ = ""; + /** + * string transaction_id = 1; + * @return The transactionId. + */ + @java.lang.Override + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } + } + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(transactionId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, transactionId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(transactionId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, transactionId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.RollbackRequest)) { + return super.equals(obj); + } + sample.rpc.RollbackRequest other = (sample.rpc.RollbackRequest) obj; + + if (!getTransactionId() + .equals(other.getTransactionId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TRANSACTION_ID_FIELD_NUMBER; + hash = (53 * hash) + getTransactionId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.RollbackRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.RollbackRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.RollbackRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.RollbackRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.RollbackRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.RollbackRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.RollbackRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.RollbackRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.RollbackRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.RollbackRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.RollbackRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.RollbackRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.RollbackRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.RollbackRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.RollbackRequest) + sample.rpc.RollbackRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_RollbackRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_RollbackRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.RollbackRequest.class, sample.rpc.RollbackRequest.Builder.class); + } + + // Construct using sample.rpc.RollbackRequest.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + transactionId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_RollbackRequest_descriptor; + } + + @java.lang.Override + public sample.rpc.RollbackRequest getDefaultInstanceForType() { + return sample.rpc.RollbackRequest.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.RollbackRequest build() { + sample.rpc.RollbackRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.RollbackRequest buildPartial() { + sample.rpc.RollbackRequest result = new sample.rpc.RollbackRequest(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.RollbackRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.transactionId_ = transactionId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.RollbackRequest) { + return mergeFrom((sample.rpc.RollbackRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.RollbackRequest other) { + if (other == sample.rpc.RollbackRequest.getDefaultInstance()) return this; + if (!other.getTransactionId().isEmpty()) { + transactionId_ = other.transactionId_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + transactionId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private java.lang.Object transactionId_ = ""; + /** + * string transaction_id = 1; + * @return The transactionId. + */ + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string transaction_id = 1; + * @param value The transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionId( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * string transaction_id = 1; + * @return This builder for chaining. + */ + public Builder clearTransactionId() { + transactionId_ = getDefaultInstance().getTransactionId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * string transaction_id = 1; + * @param value The bytes for transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.RollbackRequest) + } + + // @@protoc_insertion_point(class_scope:rpc.RollbackRequest) + private static final sample.rpc.RollbackRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.RollbackRequest(); + } + + public static sample.rpc.RollbackRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RollbackRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.RollbackRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/RollbackRequestOrBuilder.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/RollbackRequestOrBuilder.java new file mode 100644 index 00000000..b06ab5da --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/RollbackRequestOrBuilder.java @@ -0,0 +1,21 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface RollbackRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.RollbackRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * string transaction_id = 1; + * @return The transactionId. + */ + java.lang.String getTransactionId(); + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + com.google.protobuf.ByteString + getTransactionIdBytes(); +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/Sample.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/Sample.java new file mode 100644 index 00000000..1f528b54 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/Sample.java @@ -0,0 +1,269 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public final class Sample { + private Sample() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistryLite registry) { + } + + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions( + (com.google.protobuf.ExtensionRegistryLite) registry); + } + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_ItemOrder_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_ItemOrder_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_PlaceOrderRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_PlaceOrderRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_PlaceOrderResponse_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_PlaceOrderResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_Order_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_Order_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_Statement_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_Statement_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_GetOrderRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_GetOrderRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_GetOrderResponse_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_GetOrderResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_GetOrdersRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_GetOrdersRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_GetOrdersResponse_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_GetOrdersResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_GetCustomerInfoRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_GetCustomerInfoRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_GetCustomerInfoResponse_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_GetCustomerInfoResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_PaymentRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_PaymentRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_RepaymentRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_RepaymentRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_PrepareRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_PrepareRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_ValidateRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_ValidateRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_CommitRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_CommitRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_RollbackRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_RollbackRequest_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\014sample.proto\022\003rpc\032\033google/protobuf/emp" + + "ty.proto\"+\n\tItemOrder\022\017\n\007item_id\030\001 \001(\005\022\r" + + "\n\005count\030\002 \001(\005\"L\n\021PlaceOrderRequest\022\023\n\013cu" + + "stomer_id\030\001 \001(\005\022\"\n\nitem_order\030\002 \003(\0132\016.rp" + + "c.ItemOrder\"&\n\022PlaceOrderResponse\022\020\n\010ord" + + "er_id\030\001 \001(\t\"\212\001\n\005Order\022\020\n\010order_id\030\001 \001(\t\022" + + "\021\n\ttimestamp\030\002 \001(\003\022\023\n\013customer_id\030\003 \001(\005\022" + + "\025\n\rcustomer_name\030\004 \001(\t\022!\n\tstatement\030\005 \003(" + + "\0132\016.rpc.Statement\022\r\n\005total\030\006 \001(\005\"\\\n\tStat" + + "ement\022\017\n\007item_id\030\001 \001(\005\022\021\n\titem_name\030\002 \001(" + + "\t\022\r\n\005price\030\003 \001(\005\022\r\n\005count\030\004 \001(\005\022\r\n\005total" + + "\030\005 \001(\005\"#\n\017GetOrderRequest\022\020\n\010order_id\030\001 " + + "\001(\t\"-\n\020GetOrderResponse\022\031\n\005order\030\001 \001(\0132\n" + + ".rpc.Order\"\'\n\020GetOrdersRequest\022\023\n\013custom" + + "er_id\030\001 \001(\005\".\n\021GetOrdersResponse\022\031\n\005orde" + + "r\030\001 \003(\0132\n.rpc.Order\"]\n\026GetCustomerInfoRe" + + "quest\022\033\n\016transaction_id\030\001 \001(\tH\000\210\001\001\022\023\n\013cu" + + "stomer_id\030\002 \001(\005B\021\n\017_transaction_id\"_\n\027Ge" + + "tCustomerInfoResponse\022\n\n\002id\030\001 \001(\005\022\014\n\004nam" + + "e\030\002 \001(\t\022\024\n\014credit_limit\030\003 \001(\005\022\024\n\014credit_" + + "total\030\004 \001(\005\"M\n\016PaymentRequest\022\026\n\016transac" + + "tion_id\030\001 \001(\t\022\023\n\013customer_id\030\002 \001(\005\022\016\n\006am" + + "ount\030\003 \001(\005\"7\n\020RepaymentRequest\022\023\n\013custom" + + "er_id\030\001 \001(\005\022\016\n\006amount\030\002 \001(\005\"(\n\016PrepareRe" + + "quest\022\026\n\016transaction_id\030\001 \001(\t\")\n\017Validat" + + "eRequest\022\026\n\016transaction_id\030\001 \001(\t\"\'\n\rComm" + + "itRequest\022\026\n\016transaction_id\030\001 \001(\t\")\n\017Rol" + + "lbackRequest\022\026\n\016transaction_id\030\001 \001(\t2\310\001\n" + + "\014OrderService\022?\n\nPlaceOrder\022\026.rpc.PlaceO" + + "rderRequest\032\027.rpc.PlaceOrderResponse\"\000\0229" + + "\n\010GetOrder\022\024.rpc.GetOrderRequest\032\025.rpc.G" + + "etOrderResponse\"\000\022<\n\tGetOrders\022\025.rpc.Get" + + "OrdersRequest\032\026.rpc.GetOrdersResponse\"\0002" + + "\303\003\n\017CustomerService\022N\n\017GetCustomerInfo\022\033" + + ".rpc.GetCustomerInfoRequest\032\034.rpc.GetCus" + + "tomerInfoResponse\"\000\022<\n\tRepayment\022\025.rpc.R" + + "epaymentRequest\032\026.google.protobuf.Empty\"" + + "\000\0228\n\007Payment\022\023.rpc.PaymentRequest\032\026.goog" + + "le.protobuf.Empty\"\000\0228\n\007Prepare\022\023.rpc.Pre" + + "pareRequest\032\026.google.protobuf.Empty\"\000\022:\n" + + "\010Validate\022\024.rpc.ValidateRequest\032\026.google" + + ".protobuf.Empty\"\000\0226\n\006Commit\022\022.rpc.Commit" + + "Request\032\026.google.protobuf.Empty\"\000\022:\n\010Rol" + + "lback\022\024.rpc.RollbackRequest\032\026.google.pro" + + "tobuf.Empty\"\000B\026\n\nsample.rpcB\006SampleP\001b\006p" + + "roto3" + }; + descriptor = com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.protobuf.EmptyProto.getDescriptor(), + }); + internal_static_rpc_ItemOrder_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_rpc_ItemOrder_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_ItemOrder_descriptor, + new java.lang.String[] { "ItemId", "Count", }); + internal_static_rpc_PlaceOrderRequest_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_rpc_PlaceOrderRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_PlaceOrderRequest_descriptor, + new java.lang.String[] { "CustomerId", "ItemOrder", }); + internal_static_rpc_PlaceOrderResponse_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_rpc_PlaceOrderResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_PlaceOrderResponse_descriptor, + new java.lang.String[] { "OrderId", }); + internal_static_rpc_Order_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_rpc_Order_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_Order_descriptor, + new java.lang.String[] { "OrderId", "Timestamp", "CustomerId", "CustomerName", "Statement", "Total", }); + internal_static_rpc_Statement_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_rpc_Statement_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_Statement_descriptor, + new java.lang.String[] { "ItemId", "ItemName", "Price", "Count", "Total", }); + internal_static_rpc_GetOrderRequest_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_rpc_GetOrderRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_GetOrderRequest_descriptor, + new java.lang.String[] { "OrderId", }); + internal_static_rpc_GetOrderResponse_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_rpc_GetOrderResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_GetOrderResponse_descriptor, + new java.lang.String[] { "Order", }); + internal_static_rpc_GetOrdersRequest_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_rpc_GetOrdersRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_GetOrdersRequest_descriptor, + new java.lang.String[] { "CustomerId", }); + internal_static_rpc_GetOrdersResponse_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_rpc_GetOrdersResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_GetOrdersResponse_descriptor, + new java.lang.String[] { "Order", }); + internal_static_rpc_GetCustomerInfoRequest_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_rpc_GetCustomerInfoRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_GetCustomerInfoRequest_descriptor, + new java.lang.String[] { "TransactionId", "CustomerId", "TransactionId", }); + internal_static_rpc_GetCustomerInfoResponse_descriptor = + getDescriptor().getMessageTypes().get(10); + internal_static_rpc_GetCustomerInfoResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_GetCustomerInfoResponse_descriptor, + new java.lang.String[] { "Id", "Name", "CreditLimit", "CreditTotal", }); + internal_static_rpc_PaymentRequest_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_rpc_PaymentRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_PaymentRequest_descriptor, + new java.lang.String[] { "TransactionId", "CustomerId", "Amount", }); + internal_static_rpc_RepaymentRequest_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_rpc_RepaymentRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_RepaymentRequest_descriptor, + new java.lang.String[] { "CustomerId", "Amount", }); + internal_static_rpc_PrepareRequest_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_rpc_PrepareRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_PrepareRequest_descriptor, + new java.lang.String[] { "TransactionId", }); + internal_static_rpc_ValidateRequest_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_rpc_ValidateRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_ValidateRequest_descriptor, + new java.lang.String[] { "TransactionId", }); + internal_static_rpc_CommitRequest_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_rpc_CommitRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_CommitRequest_descriptor, + new java.lang.String[] { "TransactionId", }); + internal_static_rpc_RollbackRequest_descriptor = + getDescriptor().getMessageTypes().get(16); + internal_static_rpc_RollbackRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_RollbackRequest_descriptor, + new java.lang.String[] { "TransactionId", }); + com.google.protobuf.EmptyProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/Statement.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/Statement.java new file mode 100644 index 00000000..164cde9b --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/Statement.java @@ -0,0 +1,773 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.Statement} + */ +public final class Statement extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.Statement) + StatementOrBuilder { +private static final long serialVersionUID = 0L; + // Use Statement.newBuilder() to construct. + private Statement(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private Statement() { + itemName_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new Statement(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_Statement_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_Statement_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.Statement.class, sample.rpc.Statement.Builder.class); + } + + public static final int ITEM_ID_FIELD_NUMBER = 1; + private int itemId_ = 0; + /** + * int32 item_id = 1; + * @return The itemId. + */ + @java.lang.Override + public int getItemId() { + return itemId_; + } + + public static final int ITEM_NAME_FIELD_NUMBER = 2; + @SuppressWarnings("serial") + private volatile java.lang.Object itemName_ = ""; + /** + * string item_name = 2; + * @return The itemName. + */ + @java.lang.Override + public java.lang.String getItemName() { + java.lang.Object ref = itemName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + itemName_ = s; + return s; + } + } + /** + * string item_name = 2; + * @return The bytes for itemName. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getItemNameBytes() { + java.lang.Object ref = itemName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + itemName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PRICE_FIELD_NUMBER = 3; + private int price_ = 0; + /** + * int32 price = 3; + * @return The price. + */ + @java.lang.Override + public int getPrice() { + return price_; + } + + public static final int COUNT_FIELD_NUMBER = 4; + private int count_ = 0; + /** + * int32 count = 4; + * @return The count. + */ + @java.lang.Override + public int getCount() { + return count_; + } + + public static final int TOTAL_FIELD_NUMBER = 5; + private int total_ = 0; + /** + * int32 total = 5; + * @return The total. + */ + @java.lang.Override + public int getTotal() { + return total_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (itemId_ != 0) { + output.writeInt32(1, itemId_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(itemName_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, itemName_); + } + if (price_ != 0) { + output.writeInt32(3, price_); + } + if (count_ != 0) { + output.writeInt32(4, count_); + } + if (total_ != 0) { + output.writeInt32(5, total_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (itemId_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, itemId_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(itemName_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, itemName_); + } + if (price_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(3, price_); + } + if (count_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(4, count_); + } + if (total_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(5, total_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.Statement)) { + return super.equals(obj); + } + sample.rpc.Statement other = (sample.rpc.Statement) obj; + + if (getItemId() + != other.getItemId()) return false; + if (!getItemName() + .equals(other.getItemName())) return false; + if (getPrice() + != other.getPrice()) return false; + if (getCount() + != other.getCount()) return false; + if (getTotal() + != other.getTotal()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ITEM_ID_FIELD_NUMBER; + hash = (53 * hash) + getItemId(); + hash = (37 * hash) + ITEM_NAME_FIELD_NUMBER; + hash = (53 * hash) + getItemName().hashCode(); + hash = (37 * hash) + PRICE_FIELD_NUMBER; + hash = (53 * hash) + getPrice(); + hash = (37 * hash) + COUNT_FIELD_NUMBER; + hash = (53 * hash) + getCount(); + hash = (37 * hash) + TOTAL_FIELD_NUMBER; + hash = (53 * hash) + getTotal(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.Statement parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.Statement parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.Statement parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.Statement parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.Statement parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.Statement parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.Statement parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.Statement parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.Statement parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.Statement parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.Statement parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.Statement parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.Statement prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.Statement} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.Statement) + sample.rpc.StatementOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_Statement_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_Statement_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.Statement.class, sample.rpc.Statement.Builder.class); + } + + // Construct using sample.rpc.Statement.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + itemId_ = 0; + itemName_ = ""; + price_ = 0; + count_ = 0; + total_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_Statement_descriptor; + } + + @java.lang.Override + public sample.rpc.Statement getDefaultInstanceForType() { + return sample.rpc.Statement.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.Statement build() { + sample.rpc.Statement result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.Statement buildPartial() { + sample.rpc.Statement result = new sample.rpc.Statement(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.Statement result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.itemId_ = itemId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.itemName_ = itemName_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.price_ = price_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.count_ = count_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.total_ = total_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.Statement) { + return mergeFrom((sample.rpc.Statement)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.Statement other) { + if (other == sample.rpc.Statement.getDefaultInstance()) return this; + if (other.getItemId() != 0) { + setItemId(other.getItemId()); + } + if (!other.getItemName().isEmpty()) { + itemName_ = other.itemName_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.getPrice() != 0) { + setPrice(other.getPrice()); + } + if (other.getCount() != 0) { + setCount(other.getCount()); + } + if (other.getTotal() != 0) { + setTotal(other.getTotal()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + itemId_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: { + itemName_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: { + price_ = input.readInt32(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 32: { + count_ = input.readInt32(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 40: { + total_ = input.readInt32(); + bitField0_ |= 0x00000010; + break; + } // case 40 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private int itemId_ ; + /** + * int32 item_id = 1; + * @return The itemId. + */ + @java.lang.Override + public int getItemId() { + return itemId_; + } + /** + * int32 item_id = 1; + * @param value The itemId to set. + * @return This builder for chaining. + */ + public Builder setItemId(int value) { + + itemId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * int32 item_id = 1; + * @return This builder for chaining. + */ + public Builder clearItemId() { + bitField0_ = (bitField0_ & ~0x00000001); + itemId_ = 0; + onChanged(); + return this; + } + + private java.lang.Object itemName_ = ""; + /** + * string item_name = 2; + * @return The itemName. + */ + public java.lang.String getItemName() { + java.lang.Object ref = itemName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + itemName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string item_name = 2; + * @return The bytes for itemName. + */ + public com.google.protobuf.ByteString + getItemNameBytes() { + java.lang.Object ref = itemName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + itemName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string item_name = 2; + * @param value The itemName to set. + * @return This builder for chaining. + */ + public Builder setItemName( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + itemName_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * string item_name = 2; + * @return This builder for chaining. + */ + public Builder clearItemName() { + itemName_ = getDefaultInstance().getItemName(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * string item_name = 2; + * @param value The bytes for itemName to set. + * @return This builder for chaining. + */ + public Builder setItemNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + itemName_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private int price_ ; + /** + * int32 price = 3; + * @return The price. + */ + @java.lang.Override + public int getPrice() { + return price_; + } + /** + * int32 price = 3; + * @param value The price to set. + * @return This builder for chaining. + */ + public Builder setPrice(int value) { + + price_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * int32 price = 3; + * @return This builder for chaining. + */ + public Builder clearPrice() { + bitField0_ = (bitField0_ & ~0x00000004); + price_ = 0; + onChanged(); + return this; + } + + private int count_ ; + /** + * int32 count = 4; + * @return The count. + */ + @java.lang.Override + public int getCount() { + return count_; + } + /** + * int32 count = 4; + * @param value The count to set. + * @return This builder for chaining. + */ + public Builder setCount(int value) { + + count_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + /** + * int32 count = 4; + * @return This builder for chaining. + */ + public Builder clearCount() { + bitField0_ = (bitField0_ & ~0x00000008); + count_ = 0; + onChanged(); + return this; + } + + private int total_ ; + /** + * int32 total = 5; + * @return The total. + */ + @java.lang.Override + public int getTotal() { + return total_; + } + /** + * int32 total = 5; + * @param value The total to set. + * @return This builder for chaining. + */ + public Builder setTotal(int value) { + + total_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + /** + * int32 total = 5; + * @return This builder for chaining. + */ + public Builder clearTotal() { + bitField0_ = (bitField0_ & ~0x00000010); + total_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.Statement) + } + + // @@protoc_insertion_point(class_scope:rpc.Statement) + private static final sample.rpc.Statement DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.Statement(); + } + + public static sample.rpc.Statement getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Statement parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.Statement getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/StatementOrBuilder.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/StatementOrBuilder.java new file mode 100644 index 00000000..2ac1a841 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/StatementOrBuilder.java @@ -0,0 +1,45 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface StatementOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.Statement) + com.google.protobuf.MessageOrBuilder { + + /** + * int32 item_id = 1; + * @return The itemId. + */ + int getItemId(); + + /** + * string item_name = 2; + * @return The itemName. + */ + java.lang.String getItemName(); + /** + * string item_name = 2; + * @return The bytes for itemName. + */ + com.google.protobuf.ByteString + getItemNameBytes(); + + /** + * int32 price = 3; + * @return The price. + */ + int getPrice(); + + /** + * int32 count = 4; + * @return The count. + */ + int getCount(); + + /** + * int32 total = 5; + * @return The total. + */ + int getTotal(); +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/ValidateRequest.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/ValidateRequest.java new file mode 100644 index 00000000..448a8d98 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/ValidateRequest.java @@ -0,0 +1,509 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.ValidateRequest} + */ +public final class ValidateRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.ValidateRequest) + ValidateRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use ValidateRequest.newBuilder() to construct. + private ValidateRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ValidateRequest() { + transactionId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new ValidateRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_ValidateRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_ValidateRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.ValidateRequest.class, sample.rpc.ValidateRequest.Builder.class); + } + + public static final int TRANSACTION_ID_FIELD_NUMBER = 1; + @SuppressWarnings("serial") + private volatile java.lang.Object transactionId_ = ""; + /** + * string transaction_id = 1; + * @return The transactionId. + */ + @java.lang.Override + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } + } + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(transactionId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, transactionId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(transactionId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, transactionId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.ValidateRequest)) { + return super.equals(obj); + } + sample.rpc.ValidateRequest other = (sample.rpc.ValidateRequest) obj; + + if (!getTransactionId() + .equals(other.getTransactionId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TRANSACTION_ID_FIELD_NUMBER; + hash = (53 * hash) + getTransactionId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.ValidateRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.ValidateRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.ValidateRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.ValidateRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.ValidateRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.ValidateRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.ValidateRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.ValidateRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.ValidateRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.ValidateRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.ValidateRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.ValidateRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.ValidateRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.ValidateRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.ValidateRequest) + sample.rpc.ValidateRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_ValidateRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_ValidateRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.ValidateRequest.class, sample.rpc.ValidateRequest.Builder.class); + } + + // Construct using sample.rpc.ValidateRequest.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + transactionId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_ValidateRequest_descriptor; + } + + @java.lang.Override + public sample.rpc.ValidateRequest getDefaultInstanceForType() { + return sample.rpc.ValidateRequest.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.ValidateRequest build() { + sample.rpc.ValidateRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.ValidateRequest buildPartial() { + sample.rpc.ValidateRequest result = new sample.rpc.ValidateRequest(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.ValidateRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.transactionId_ = transactionId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.ValidateRequest) { + return mergeFrom((sample.rpc.ValidateRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.ValidateRequest other) { + if (other == sample.rpc.ValidateRequest.getDefaultInstance()) return this; + if (!other.getTransactionId().isEmpty()) { + transactionId_ = other.transactionId_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + transactionId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private java.lang.Object transactionId_ = ""; + /** + * string transaction_id = 1; + * @return The transactionId. + */ + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string transaction_id = 1; + * @param value The transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionId( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * string transaction_id = 1; + * @return This builder for chaining. + */ + public Builder clearTransactionId() { + transactionId_ = getDefaultInstance().getTransactionId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * string transaction_id = 1; + * @param value The bytes for transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.ValidateRequest) + } + + // @@protoc_insertion_point(class_scope:rpc.ValidateRequest) + private static final sample.rpc.ValidateRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.ValidateRequest(); + } + + public static sample.rpc.ValidateRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ValidateRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.ValidateRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/ValidateRequestOrBuilder.java b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/ValidateRequestOrBuilder.java new file mode 100644 index 00000000..3934c2d2 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/java/sample/rpc/ValidateRequestOrBuilder.java @@ -0,0 +1,21 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface ValidateRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.ValidateRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * string transaction_id = 1; + * @return The transactionId. + */ + java.lang.String getTransactionId(); + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + com.google.protobuf.ByteString + getTransactionIdBytes(); +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/proto/sample.proto b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/proto/sample.proto new file mode 100644 index 00000000..98065367 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/rpc/src/main/proto/sample.proto @@ -0,0 +1,136 @@ +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "sample.rpc"; +option java_outer_classname = "Sample"; + +package rpc; + +import "google/protobuf/empty.proto"; + +// for Order Service +service OrderService { + // Place an order. It's a transaction that spans OrderService and CustomerService + rpc PlaceOrder(PlaceOrderRequest) returns (PlaceOrderResponse) { + } + // Get Order information by order ID + rpc GetOrder(GetOrderRequest) returns (GetOrderResponse) { + } + // Get Order information by customer ID + rpc GetOrders(GetOrdersRequest) returns (GetOrdersResponse) { + } +} + +message ItemOrder { + int32 item_id = 1; + int32 count = 2; +} + +message PlaceOrderRequest { + int32 customer_id = 1; + repeated ItemOrder item_order = 2; +} + +message PlaceOrderResponse { + string order_id = 1; +} + +message Order { + string order_id = 1; + int64 timestamp = 2; + int32 customer_id = 3; + string customer_name = 4; + repeated Statement statement = 5; + int32 total = 6; +} + +message Statement { + int32 item_id = 1; + string item_name = 2; + int32 price = 3; + int32 count = 4; + int32 total = 5; +} + +message GetOrderRequest { + string order_id = 1; +} + +message GetOrderResponse { + Order order = 1; +} + +message GetOrdersRequest { + int32 customer_id = 1; +} + +message GetOrdersResponse { + repeated Order order = 1; +} + +// for Customer Service +service CustomerService { + // Get customer information + rpc GetCustomerInfo(GetCustomerInfoRequest) returns (GetCustomerInfoResponse) { + } + // Credit card repayment + rpc Repayment(RepaymentRequest) returns (google.protobuf.Empty) { + } + + // RPCs for two-phase commit transactions + + // Credit card payment + rpc Payment(PaymentRequest) returns (google.protobuf.Empty) { + } + // Prepare the transaction + rpc Prepare(PrepareRequest) returns (google.protobuf.Empty) { + } + // Validate the transaction + rpc Validate(ValidateRequest) returns (google.protobuf.Empty) { + } + // Commit the transaction + rpc Commit(CommitRequest) returns (google.protobuf.Empty) { + } + // Rollback the transaction + rpc Rollback(RollbackRequest) returns (google.protobuf.Empty) { + } +} + +message GetCustomerInfoRequest { + optional string transaction_id = 1; + int32 customer_id = 2; +} + +message GetCustomerInfoResponse { + int32 id = 1; + string name = 2; + int32 credit_limit = 3; + int32 credit_total = 4; +} + +message PaymentRequest { + string transaction_id = 1; + int32 customer_id = 2; + int32 amount = 3; +} + +message RepaymentRequest { + int32 customer_id = 1; + int32 amount = 2; +} + +message PrepareRequest { + string transaction_id = 1; +} + +message ValidateRequest { + string transaction_id = 1; +} + +message CommitRequest { + string transaction_id = 1; +} + +message RollbackRequest { + string transaction_id = 1; +} diff --git a/docs/3.12/scalardb-samples/microservice-transaction-sample/settings.gradle b/docs/3.12/scalardb-samples/microservice-transaction-sample/settings.gradle new file mode 100644 index 00000000..26dbd9e9 --- /dev/null +++ b/docs/3.12/scalardb-samples/microservice-transaction-sample/settings.gradle @@ -0,0 +1,6 @@ +rootProject.name = 'microservice-transaction-sample' +include 'customer-service' +include 'order-service' +include 'rpc' +include 'client' + diff --git a/docs/3.12/scalardb-samples/multi-storage-transaction-sample/README.md b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/README.md new file mode 100644 index 00000000..b1e3d787 --- /dev/null +++ b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/README.md @@ -0,0 +1,318 @@ +# Create a Sample Application That Supports Multi-Storage Transactions + +This tutorial describes how to create a sample application that supports the multi-storage transactions feature in ScalarDB. + +## Overview + +The sample e-commerce application shows how users can order and pay for items by using a line of credit. The use case described in this tutorial is the same as the basic [ScalarDB sample](../scalardb-sample/README.md) but takes advantage of the [multi-storage transactions](https://github.com/scalar-labs/scalardb/blob/master/docs/multi-storage-transactions.md) feature in ScalarDB. + +In this tutorial, you will build an application that uses both Cassandra and MySQL. By using the multi-storage transactions feature in ScalarDB, you can execute a transaction that spans both Cassandra and MySQL. + +![Overview](images/overview.png) + +{% capture notice--info %} +**Note** + +Since the focus of the sample application is to demonstrate using ScalarDB, application-specific error handling, authentication processing, and similar functions are not included in the sample application. For details about exception handling in ScalarDB, see [How to handle exceptions](https://github.com/scalar-labs/scalardb/blob/master/docs/api-guide.md#how-to-handle-exceptions). +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### What you can do in this sample application + +The sample application supports the following types of transactions: + +- Get customer information. +- Place an order by using a line of credit. + - Checks if the cost of the order is below the customer's credit limit. + - If the check passes, records the order history and updates the amount the customer has spent. +- Get order information by order ID. +- Get order information by customer ID. +- Make a payment. + - Reduces the amount the customer has spent. + +## Prerequisites + +- One of the following Java Development Kits (JDKs): + - [Oracle JDK](https://www.oracle.com/java/technologies/downloads/) LTS version (8, 11, or 17) + - [OpenJDK](https://openjdk.org/install/) LTS version (8, 11, or 17) +- [Docker](https://www.docker.com/get-started/) 20.10 or later with [Docker Compose](https://docs.docker.com/compose/install/) V2 or later + +{% capture notice--info %} +**Note** + +We recommend using the LTS versions mentioned above, but other non-LTS versions may work. + +In addition, other JDKs should work with ScalarDB, but we haven't tested them. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Set up ScalarDB + +The following sections describe how to set up the sample application that supports the multi-storage transactions feature in ScalarDB. + +### Clone the ScalarDB samples repository + +Open **Terminal**, then clone the ScalarDB samples repository by running the following command: + +```console +$ git clone https://github.com/scalar-labs/scalardb-samples +``` + +Then, go to the directory that contains the sample application by running the following command: + +```console +$ cd scalardb-samples/multi-storage-transaction-sample +``` + +### Start Cassandra and MySQL + +Cassandra and MySQL are already configured for the sample application, as shown in [`database.properties`](database.properties). For details about configuring the multi-storage transactions feature in ScalarDB, see [How to configure ScalarDB to support multi-storage transactions](https://github.com/scalar-labs/scalardb/blob/master/docs/multi-storage-transactions.md#how-to-configure-scalardb-to-support-multi-storage-transactions). + +To start Cassandra and MySQL, which are included in the Docker container for the sample application, make sure Docker is running and then run the following command: + +```console +$ docker-compose up -d +``` + +{% capture notice--info %} +**Note** + +Starting the Docker container may take more than one minute depending on your development environment. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Load the schema + +The database schema (the method in which the data will be organized) for the sample application has already been defined in [`schema.json`](schema.json). + +To apply the schema, go to the [ScalarDB Releases](https://github.com/scalar-labs/scalardb/releases) page and download the ScalarDB Schema Loader that matches the version of ScalarDB that you want to use to the `scalardb-samples/multi-storage-transaction-sample` folder. + +Then, run the following command, replacing `` with the version of the ScalarDB Schema Loader that you downloaded: + +```console +$ java -jar scalardb-schema-loader-.jar --config database.properties --schema-file schema.json --coordinator +``` + +#### Schema details + +As shown in [`schema.json`](schema.json) for the sample application, all the tables are created in the `customer` and `order` namespaces. + +- `customer.customers`: a table that manages customers' information + - `credit_limit`: the maximum amount of money a lender will allow each customer to spend when using a line of credit + - `credit_total`: the amount of money that each customer has already spent by using their line of credit +- `order.orders`: a table that manages order information +- `order.statements`: a table that manages order statement information +- `order.items`: a table that manages information of items to be ordered + +The Entity Relationship Diagram for the schema is as follows: + +![ERD](images/ERD.png) + +### Load the initial data + +After the Docker container has started, load the initial data by running the following command: + +```console +$ ./gradlew run --args="LoadInitialData" +``` + +After the initial data has loaded, the following records should be stored in the tables. + +**`customer.customers` table** + +| customer_id | name | credit_limit | credit_total | +|-------------|---------------|--------------|--------------| +| 1 | Yamada Taro | 10000 | 0 | +| 2 | Yamada Hanako | 10000 | 0 | +| 3 | Suzuki Ichiro | 10000 | 0 | + +**`order.items` table** + +| item_id | name | price | +|---------|--------|-------| +| 1 | Apple | 1000 | +| 2 | Orange | 2000 | +| 3 | Grape | 2500 | +| 4 | Mango | 5000 | +| 5 | Melon | 3000 | + +## Execute transactions and retrieve data in the sample application + +The following sections describe how to execute transactions and retrieve data in the sample e-commerce application. + +### Get customer information + +Start with getting information about the customer whose ID is `1` by running the following command: + +```console +$ ./gradlew run --args="GetCustomerInfo 1" +``` + +You should see the following output: + +```console +... +{"id": 1, "name": "Yamada Taro", "credit_limit": 10000, "credit_total": 0} +... +``` + +### Place an order + +Then, have customer ID `1` place an order for three apples and two oranges by running the following command: + +{% capture notice--info %} +**Note** + +The order format in this command is `./gradlew run --args="PlaceOrder :,:,..."`. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +```console +$ ./gradlew run --args="PlaceOrder 1 1:3,2:2" +``` + +You should see a similar output as below, with a different UUID for `order_id`, which confirms that the order was successful: + +```console +... +{"order_id": "dea4964a-ff50-4ecf-9201-027981a1566e"} +... +``` + +### Check order details + +Check details about the order by running the following command, replacing `` with the UUID for the `order_id` that was shown after running the previous command: + +```console +$ ./gradlew run --args="GetOrder " +``` + +You should see a similar output as below, with different UUIDs for `order_id` and `timestamp`: + +```console +... +{"order": {"order_id": "dea4964a-ff50-4ecf-9201-027981a1566e","timestamp": 1650948340914,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 1,"item_name": "Apple","price": 1000,"count": 3,"total": 3000},{"item_id": 2,"item_name": "Orange","price": 2000,"count": 2,"total": 4000}],"total": 7000}} +... +``` + +### Place another order + +Place an order for one melon that uses the remaining amount in `credit_total` for customer ID `1` by running the following command: + +```console +$ ./gradlew run --args="PlaceOrder 1 5:1" +``` + +You should see a similar output as below, with a different UUID for `order_id`, which confirms that the order was successful: + +```console +... +{"order_id": "bcc34150-91fa-4bea-83db-d2dbe6f0f30d"} +... +``` + +### Check order history + +Get the history of all orders for customer ID `1` by running the following command: + +```console +$ ./gradlew run --args="GetOrders 1" +``` + +You should see a similar output as below, with different UUIDs for `order_id` and `timestamp`, which shows the history of all orders for customer ID `1` in descending order by timestamp: + +```console +... +{"order": [{"order_id": "dea4964a-ff50-4ecf-9201-027981a1566e","timestamp": 1650948340914,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 1,"item_name": "Apple","price": 1000,"count": 3,"total": 3000},{"item_id": 2,"item_name": "Orange","price": 2000,"count": 2,"total": 4000}],"total": 7000},{"order_id": "bcc34150-91fa-4bea-83db-d2dbe6f0f30d","timestamp": 1650948412766,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 5,"item_name": "Melon","price": 3000,"count": 1,"total": 3000}],"total": 3000}]} +... +``` + +### Check credit total + +Get the credit total for customer ID `1` by running the following command: + +```console +$ ./gradlew run --args="GetCustomerInfo 1" +``` + +You should see the following output, which shows that customer ID `1` has reached their `credit_limit` in `credit_total` and cannot place anymore orders: + +```console +... +{"id": 1, "name": "Yamada Taro", "credit_limit": 10000, "credit_total": 10000} +... +``` + +Try to place an order for one grape and one mango by running the following command: + +```console +$ ./gradlew run --args="PlaceOrder 1 3:1,4:1" +``` + +You should see the following output, which shows that the order failed because the `credit_total` amount would exceed the `credit_limit` amount: + +```console +... +java.lang.RuntimeException: Credit limit exceeded + at sample.Sample.placeOrder(Sample.java:205) + at sample.command.PlaceOrderCommand.call(PlaceOrderCommand.java:33) + at sample.command.PlaceOrderCommand.call(PlaceOrderCommand.java:8) + at picocli.CommandLine.executeUserObject(CommandLine.java:1783) + at picocli.CommandLine.access$900(CommandLine.java:145) + at picocli.CommandLine$RunLast.handle(CommandLine.java:2141) + at picocli.CommandLine$RunLast.handle(CommandLine.java:2108) + at picocli.CommandLine$AbstractParseResultHandler.execute(CommandLine.java:1975) + at picocli.CommandLine.execute(CommandLine.java:1904) + at sample.command.SampleCommand.main(SampleCommand.java:35) +... +``` + +### Make a payment + +To continue making orders, customer ID `1` must make a payment to reduce the `credit_total` amount. + +Make a payment by running the following command: + +```console +$ ./gradlew run --args="Repayment 1 8000" +``` + +Then, check the `credit_total` amount for customer ID `1` by running the following command: + +```console +$ ./gradlew run --args="GetCustomerInfo 1" +``` + +You should see the following output, which shows that a payment was applied to customer ID `1`, reducing the `credit_total` amount: + +```console +... +{"id": 1, "name": "Yamada Taro", "credit_limit": 10000, "credit_total": 2000} +... +``` + +Now that customer ID `1` has made a payment, place an order for one grape and one melon by running the following command: + +```console +$ ./gradlew run --args="PlaceOrder 1 3:1,4:1" +``` + +You should see a similar output as below, with a different UUID for `order_id`, which confirms that the order was successful: + +```console +... +{"order_id": "8911cab3-1c2b-4322-9386-adb1c024e078"} +... +``` + +## Stop the sample application + +To stop the sample application, stop the Docker container by running the following command: + +```console +$ docker-compose down +``` diff --git a/docs/3.12/scalardb-samples/multi-storage-transaction-sample/build.gradle b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/build.gradle new file mode 100644 index 00000000..528aa916 --- /dev/null +++ b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/build.gradle @@ -0,0 +1,25 @@ +plugins { + id 'java' + id 'application' +} + +group 'org.sample' +version '1.0-SNAPSHOT' + +repositories { + mavenCentral() +} + +dependencies { + implementation 'com.scalar-labs:scalardb:3.9.0' + implementation 'info.picocli:picocli:4.7.1' +} + +application { + mainClassName = 'sample.command.SampleCommand' +} + +archivesBaseName = "sample" + +sourceCompatibility = 1.8 +targetCompatibility = 1.8 diff --git a/docs/3.12/scalardb-samples/multi-storage-transaction-sample/database.properties b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/database.properties new file mode 100644 index 00000000..f41a8636 --- /dev/null +++ b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/database.properties @@ -0,0 +1,12 @@ +scalar.db.storage=multi-storage +scalar.db.multi_storage.storages=cassandra,mysql +scalar.db.multi_storage.storages.cassandra.storage=cassandra +scalar.db.multi_storage.storages.cassandra.contact_points=localhost +scalar.db.multi_storage.storages.cassandra.username=cassandra +scalar.db.multi_storage.storages.cassandra.password=cassandra +scalar.db.multi_storage.storages.mysql.storage=jdbc +scalar.db.multi_storage.storages.mysql.contact_points=jdbc:mysql://localhost:3306/ +scalar.db.multi_storage.storages.mysql.username=root +scalar.db.multi_storage.storages.mysql.password=mysql +scalar.db.multi_storage.namespace_mapping=customer:mysql,order:cassandra,coordinator:cassandra +scalar.db.multi_storage.default_storage=cassandra diff --git a/docs/3.12/scalardb-samples/multi-storage-transaction-sample/docker-compose.yml b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/docker-compose.yml new file mode 100644 index 00000000..4fcc7553 --- /dev/null +++ b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/docker-compose.yml @@ -0,0 +1,14 @@ +version: "3.5" +services: + mysql: + image: mysql:8.0 + environment: + MYSQL_ROOT_PASSWORD: mysql + container_name: "mysql-1" + ports: + - "3306:3306" + cassandra: + image: cassandra:3.11 + container_name: "cassandra-1" + ports: + - "9042:9042" diff --git a/docs/3.12/scalardb-samples/multi-storage-transaction-sample/gradle/wrapper/gradle-wrapper.jar b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 00000000..7454180f Binary files /dev/null and b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/gradle/wrapper/gradle-wrapper.jar differ diff --git a/docs/3.12/scalardb-samples/multi-storage-transaction-sample/gradle/wrapper/gradle-wrapper.properties b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 00000000..070cb702 --- /dev/null +++ b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,5 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-7.6-bin.zip +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/docs/3.12/scalardb-samples/multi-storage-transaction-sample/gradlew b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/gradlew new file mode 100755 index 00000000..744e882e --- /dev/null +++ b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/gradlew @@ -0,0 +1,185 @@ +#!/usr/bin/env sh + +# +# Copyright 2015 the original author or authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn () { + echo "$*" +} + +die () { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MSYS* | MINGW* ) + msys=true + ;; + NONSTOP* ) + nonstop=true + ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin or MSYS, switch paths to Windows format before running java +if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=`expr $i + 1` + done + case $i in + 0) set -- ;; + 1) set -- "$args0" ;; + 2) set -- "$args0" "$args1" ;; + 3) set -- "$args0" "$args1" "$args2" ;; + 4) set -- "$args0" "$args1" "$args2" "$args3" ;; + 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Escape application args +save () { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " +} +APP_ARGS=`save "$@"` + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +exec "$JAVACMD" "$@" diff --git a/docs/3.12/scalardb-samples/multi-storage-transaction-sample/gradlew.bat b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/gradlew.bat new file mode 100644 index 00000000..107acd32 --- /dev/null +++ b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/gradlew.bat @@ -0,0 +1,89 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto execute + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/docs/3.12/scalardb-samples/multi-storage-transaction-sample/images/ERD.png b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/images/ERD.png new file mode 100644 index 00000000..02100437 Binary files /dev/null and b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/images/ERD.png differ diff --git a/docs/3.12/scalardb-samples/multi-storage-transaction-sample/images/overview.png b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/images/overview.png new file mode 100644 index 00000000..16749f3b Binary files /dev/null and b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/images/overview.png differ diff --git a/docs/3.12/scalardb-samples/multi-storage-transaction-sample/schema.json b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/schema.json new file mode 100644 index 00000000..d5cbf601 --- /dev/null +++ b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/schema.json @@ -0,0 +1,56 @@ +{ + "customer.customers": { + "transaction": true, + "partition-key": [ + "customer_id" + ], + "columns": { + "customer_id": "INT", + "name": "TEXT", + "credit_limit": "INT", + "credit_total": "INT" + } + }, + "order.orders": { + "transaction": true, + "partition-key": [ + "customer_id" + ], + "clustering-key": [ + "timestamp" + ], + "secondary-index": [ + "order_id" + ], + "columns": { + "order_id": "TEXT", + "customer_id": "INT", + "timestamp": "BIGINT" + } + }, + "order.statements": { + "transaction": true, + "partition-key": [ + "order_id" + ], + "clustering-key": [ + "item_id" + ], + "columns": { + "order_id": "TEXT", + "item_id": "INT", + "count": "INT" + } + }, + "order.items": { + "transaction": true, + "partition-key": [ + "item_id" + ], + "columns": { + "item_id": "INT", + "name": "TEXT", + "price": "INT" + } + } +} diff --git a/docs/3.12/scalardb-samples/multi-storage-transaction-sample/settings.gradle b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/settings.gradle new file mode 100644 index 00000000..1c51e286 --- /dev/null +++ b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/settings.gradle @@ -0,0 +1 @@ +rootProject.name = 'multi-storage-transaction-sample' diff --git a/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/Sample.java b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/Sample.java new file mode 100644 index 00000000..9458347a --- /dev/null +++ b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/Sample.java @@ -0,0 +1,415 @@ +package sample; + +import com.scalar.db.api.DistributedTransaction; +import com.scalar.db.api.DistributedTransactionManager; +import com.scalar.db.api.Get; +import com.scalar.db.api.Put; +import com.scalar.db.api.Result; +import com.scalar.db.api.Scan; +import com.scalar.db.exception.transaction.TransactionException; +import com.scalar.db.io.Key; +import com.scalar.db.service.TransactionFactory; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.UUID; + +public class Sample implements AutoCloseable { + + private final DistributedTransactionManager manager; + + public Sample() throws IOException { + // Create a transaction manager object + TransactionFactory factory = TransactionFactory.create("database.properties"); + manager = factory.getTransactionManager(); + } + + public void loadInitialData() throws TransactionException { + DistributedTransaction transaction = null; + try { + transaction = manager.start(); + loadCustomerIfNotExists(transaction, 1, "Yamada Taro", 10000, 0); + loadCustomerIfNotExists(transaction, 2, "Yamada Hanako", 10000, 0); + loadCustomerIfNotExists(transaction, 3, "Suzuki Ichiro", 10000, 0); + loadItemIfNotExists(transaction, 1, "Apple", 1000); + loadItemIfNotExists(transaction, 2, "Orange", 2000); + loadItemIfNotExists(transaction, 3, "Grape", 2500); + loadItemIfNotExists(transaction, 4, "Mango", 5000); + loadItemIfNotExists(transaction, 5, "Melon", 3000); + transaction.commit(); + } catch (TransactionException e) { + if (transaction != null) { + // If an error occurs, abort the transaction + transaction.abort(); + } + throw e; + } + } + + private void loadCustomerIfNotExists( + DistributedTransaction transaction, + int customerId, + String name, + int creditLimit, + int creditTotal) + throws TransactionException { + Optional customer = + transaction.get( + Get.newBuilder() + .namespace("customer") + .table("customers") + .partitionKey(Key.ofInt("customer_id", customerId)) + .build()); + if (!customer.isPresent()) { + transaction.put( + Put.newBuilder() + .namespace("customer") + .table("customers") + .partitionKey(Key.ofInt("customer_id", customerId)) + .textValue("name", name) + .intValue("credit_limit", creditLimit) + .intValue("credit_total", creditTotal) + .build()); + } + } + + private void loadItemIfNotExists( + DistributedTransaction transaction, int itemId, String name, int price) + throws TransactionException { + Optional item = + transaction.get( + Get.newBuilder() + .namespace("order") + .table("items") + .partitionKey(Key.ofInt("item_id", itemId)) + .build()); + if (!item.isPresent()) { + transaction.put( + Put.newBuilder() + .namespace("order") + .table("items") + .partitionKey(Key.ofInt("item_id", itemId)) + .textValue("name", name) + .intValue("price", price) + .build()); + } + } + + public String getCustomerInfo(int customerId) throws TransactionException { + DistributedTransaction transaction = null; + try { + // Start a transaction + transaction = manager.start(); + + // Retrieve the customer info for the specified customer ID from the customers table + Optional customer = + transaction.get( + Get.newBuilder() + .namespace("customer") + .table("customers") + .partitionKey(Key.ofInt("customer_id", customerId)) + .build()); + + if (!customer.isPresent()) { + // If the customer info the specified customer ID doesn't exist, throw an exception + throw new RuntimeException("Customer not found"); + } + + // Commit the transaction (even when the transaction is read-only, we need to commit) + transaction.commit(); + + // Return the customer info as a JSON format + return String.format( + "{\"id\": %d, \"name\": \"%s\", \"credit_limit\": %d, \"credit_total\": %d}", + customerId, + customer.get().getText("name"), + customer.get().getInt("credit_limit"), + customer.get().getInt("credit_total")); + } catch (Exception e) { + if (transaction != null) { + // If an error occurs, abort the transaction + transaction.abort(); + } + throw e; + } + } + + public String placeOrder(int customerId, int[] itemIds, int[] itemCounts) + throws TransactionException { + assert itemIds.length == itemCounts.length; + + DistributedTransaction transaction = null; + try { + String orderId = UUID.randomUUID().toString(); + + // Start a transaction + transaction = manager.start(); + + // Put the order info into the orders table + transaction.put( + Put.newBuilder() + .namespace("order") + .table("orders") + .partitionKey(Key.ofInt("customer_id", customerId)) + .clusteringKey(Key.ofBigInt("timestamp", System.currentTimeMillis())) + .textValue("order_id", orderId) + .build()); + + int amount = 0; + for (int i = 0; i < itemIds.length; i++) { + int itemId = itemIds[i]; + int count = itemCounts[i]; + + // Put the order statement into the statements table + transaction.put( + Put.newBuilder() + .namespace("order") + .table("statements") + .partitionKey(Key.ofText("order_id", orderId)) + .clusteringKey(Key.ofInt("item_id", itemId)) + .intValue("count", count) + .build()); + + // Retrieve the item info from the items table + Optional item = + transaction.get( + Get.newBuilder() + .namespace("order") + .table("items") + .partitionKey(Key.ofInt("item_id", itemId)) + .build()); + + if (!item.isPresent()) { + throw new RuntimeException("Item not found"); + } + + // Calculate the total amount + amount += item.get().getInt("price") * count; + } + + // Check if the credit total exceeds the credit limit after payment + Optional customer = + transaction.get( + Get.newBuilder() + .namespace("customer") + .table("customers") + .partitionKey(Key.ofInt("customer_id", customerId)) + .build()); + if (!customer.isPresent()) { + throw new RuntimeException("Customer not found"); + } + int creditLimit = customer.get().getInt("credit_limit"); + int creditTotal = customer.get().getInt("credit_total"); + if (creditTotal + amount > creditLimit) { + throw new RuntimeException("Credit limit exceeded"); + } + + // Update credit_total for the customer + transaction.put( + Put.newBuilder() + .namespace("customer") + .table("customers") + .partitionKey(Key.ofInt("customer_id", customerId)) + .intValue("credit_total", creditTotal + amount) + .build()); + + // Commit the transaction + transaction.commit(); + + // Return the order id + return String.format("{\"order_id\": \"%s\"}", orderId); + } catch (Exception e) { + if (transaction != null) { + // If an error occurs, abort the transaction + transaction.abort(); + } + throw e; + } + } + + private String getOrderJson(DistributedTransaction transaction, String orderId) + throws TransactionException { + // Retrieve the order info for the order ID from the orders table + Optional order = + transaction.get( + Get.newBuilder() + .namespace("order") + .table("orders") + .indexKey(Key.ofText("order_id", orderId)) + .build()); + + if (!order.isPresent()) { + throw new RuntimeException("Order not found"); + } + + int customerId = order.get().getInt("customer_id"); + + // Retrieve the customer info for the specified customer ID from the customers table + Optional customer = + transaction.get( + Get.newBuilder() + .namespace("customer") + .table("customers") + .partitionKey(Key.ofInt("customer_id", customerId)) + .build()); + assert customer.isPresent(); + + // Retrieve the order statements for the order ID from the statements table + List statements = + transaction.scan( + Scan.newBuilder() + .namespace("order") + .table("statements") + .partitionKey(Key.ofText("order_id", orderId)) + .build()); + + // Make the statements JSONs + List statementJsons = new ArrayList<>(); + int total = 0; + for (Result statement : statements) { + int itemId = statement.getInt("item_id"); + + // Retrieve the item data from the items table + Optional item = + transaction.get( + Get.newBuilder() + .namespace("order") + .table("items") + .partitionKey(Key.ofInt("item_id", itemId)) + .build()); + + if (!item.isPresent()) { + throw new RuntimeException("Item not found"); + } + + int price = item.get().getInt("price"); + int count = statement.getInt("count"); + + statementJsons.add( + String.format( + "{\"item_id\": %d,\"item_name\": \"%s\",\"price\": %d,\"count\": %d,\"total\": %d}", + itemId, item.get().getText("name"), price, count, price * count)); + + total += price * count; + } + + // Return the order info as a JSON format + return String.format( + "{\"order_id\": \"%s\",\"timestamp\": %d,\"customer_id\": %d,\"customer_name\": \"%s\",\"statement\": [%s],\"total\": %d}", + orderId, + order.get().getBigInt("timestamp"), + customerId, + customer.get().getText("name"), + String.join(",", statementJsons), + total); + } + + public String getOrderByOrderId(String orderId) throws TransactionException { + DistributedTransaction transaction = null; + try { + // Start a transaction + transaction = manager.start(); + + // Get an order JSON for the specified order ID + String orderJson = getOrderJson(transaction, orderId); + + // Commit the transaction (even when the transaction is read-only, we need to commit) + transaction.commit(); + + // Return the order info as a JSON format + return String.format("{\"order\": %s}", orderJson); + } catch (Exception e) { + if (transaction != null) { + // If an error occurs, abort the transaction + transaction.abort(); + } + throw e; + } + } + + public String getOrdersByCustomerId(int customerId) throws TransactionException { + DistributedTransaction transaction = null; + try { + // Start a transaction + transaction = manager.start(); + + // Retrieve the order info for the customer ID from the orders table + List orders = + transaction.scan( + Scan.newBuilder() + .namespace("order") + .table("orders") + .partitionKey(Key.ofInt("customer_id", customerId)) + .build()); + + // Make order JSONs for the orders of the customer + List orderJsons = new ArrayList<>(); + for (Result order : orders) { + orderJsons.add(getOrderJson(transaction, order.getText("order_id"))); + } + + // Commit the transaction (even when the transaction is read-only, we need to commit) + transaction.commit(); + + // Return the order info as a JSON format + return String.format("{\"order\": [%s]}", String.join(",", orderJsons)); + } catch (Exception e) { + if (transaction != null) { + // If an error occurs, abort the transaction + transaction.abort(); + } + throw e; + } + } + + public void repayment(int customerId, int amount) throws TransactionException { + DistributedTransaction transaction = null; + try { + // Start a transaction + transaction = manager.start(); + + // Retrieve the customer info for the specified customer ID from the customers table + Optional customer = + transaction.get( + Get.newBuilder() + .namespace("customer") + .table("customers") + .partitionKey(Key.ofInt("customer_id", customerId)) + .build()); + if (!customer.isPresent()) { + throw new RuntimeException("Customer not found"); + } + + int updatedCreditTotal = customer.get().getInt("credit_total") - amount; + + // Check if over repayment or not + if (updatedCreditTotal < 0) { + throw new RuntimeException("Over repayment"); + } + + // Reduce credit_total for the customer + transaction.put( + Put.newBuilder() + .namespace("customer") + .table("customers") + .partitionKey(Key.ofInt("customer_id", customerId)) + .intValue("credit_total", updatedCreditTotal) + .build()); + + // Commit the transaction + transaction.commit(); + } catch (Exception e) { + if (transaction != null) { + // If an error occurs, abort the transaction + transaction.abort(); + } + throw e; + } + } + + @Override + public void close() { + manager.close(); + } +} diff --git a/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/command/GetCustomerInfoCommand.java b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/command/GetCustomerInfoCommand.java new file mode 100644 index 00000000..8c397e12 --- /dev/null +++ b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/command/GetCustomerInfoCommand.java @@ -0,0 +1,21 @@ +package sample.command; + +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.Sample; + +@Command(name = "GetCustomerInfo", description = "Get customer information") +public class GetCustomerInfoCommand implements Callable { + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Override + public Integer call() throws Exception { + try (Sample sample = new Sample()) { + System.out.println(sample.getCustomerInfo(customerId)); + } + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/command/GetOrderCommand.java b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/command/GetOrderCommand.java new file mode 100644 index 00000000..abc94537 --- /dev/null +++ b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/command/GetOrderCommand.java @@ -0,0 +1,21 @@ +package sample.command; + +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.Sample; + +@Command(name = "GetOrder", description = "Get order information by order ID") +public class GetOrderCommand implements Callable { + + @Parameters(index = "0", paramLabel = "ORDER_ID", description = "order ID") + private String orderId; + + @Override + public Integer call() throws Exception { + try (Sample sample = new Sample()) { + System.out.println(sample.getOrderByOrderId(orderId)); + } + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/command/GetOrdersCommand.java b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/command/GetOrdersCommand.java new file mode 100644 index 00000000..428a1a83 --- /dev/null +++ b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/command/GetOrdersCommand.java @@ -0,0 +1,21 @@ +package sample.command; + +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.Sample; + +@Command(name = "GetOrders", description = "Get order information by customer ID") +public class GetOrdersCommand implements Callable { + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Override + public Integer call() throws Exception { + try (Sample sample = new Sample()) { + System.out.println(sample.getOrdersByCustomerId(customerId)); + } + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/command/LoadInitialDataCommand.java b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/command/LoadInitialDataCommand.java new file mode 100644 index 00000000..32f9088e --- /dev/null +++ b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/command/LoadInitialDataCommand.java @@ -0,0 +1,17 @@ +package sample.command; + +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import sample.Sample; + +@Command(name = "LoadInitialData", description = "Load initial data") +public class LoadInitialDataCommand implements Callable { + + @Override + public Integer call() throws Exception { + try (Sample sample = new Sample()) { + sample.loadInitialData(); + } + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/command/PlaceOrderCommand.java b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/command/PlaceOrderCommand.java new file mode 100644 index 00000000..929b50b4 --- /dev/null +++ b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/command/PlaceOrderCommand.java @@ -0,0 +1,38 @@ +package sample.command; + +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.Sample; + +@Command(name = "PlaceOrder", description = "Place an order") +public class PlaceOrderCommand implements Callable { + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Parameters( + index = "1", + paramLabel = "ORDERS", + description = "orders. The format is \":,:,...\"") + private String orders; + + @Override + public Integer call() throws Exception { + String[] split = orders.split(",", -1); + int[] itemIds = new int[split.length]; + int[] itemCounts = new int[split.length]; + + for (int i = 0; i < split.length; i++) { + String[] s = split[i].split(":", -1); + itemIds[i] = Integer.parseInt(s[0]); + itemCounts[i] = Integer.parseInt(s[1]); + } + + try (Sample sample = new Sample()) { + System.out.println(sample.placeOrder(customerId, itemIds, itemCounts)); + } + + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/command/RepaymentCommand.java b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/command/RepaymentCommand.java new file mode 100644 index 00000000..868b1748 --- /dev/null +++ b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/command/RepaymentCommand.java @@ -0,0 +1,24 @@ +package sample.command; + +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.Sample; + +@Command(name = "Repayment", description = "Repayment") +public class RepaymentCommand implements Callable { + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Parameters(index = "1", paramLabel = "AMOUNT", description = "amount of the money for repayment") + private int amount; + + @Override + public Integer call() throws Exception { + try (Sample sample = new Sample()) { + sample.repayment(customerId, amount); + } + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/command/SampleCommand.java b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/command/SampleCommand.java new file mode 100644 index 00000000..0dfdf690 --- /dev/null +++ b/docs/3.12/scalardb-samples/multi-storage-transaction-sample/src/main/java/sample/command/SampleCommand.java @@ -0,0 +1,37 @@ +package sample.command; + +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Option; + +@Command( + name = "bin/sample", + description = "Sample application for Microservice Transaction", + subcommands = { + LoadInitialDataCommand.class, + PlaceOrderCommand.class, + GetOrderCommand.class, + GetOrdersCommand.class, + GetCustomerInfoCommand.class, + RepaymentCommand.class + }) +public class SampleCommand implements Runnable { + + @Option( + names = {"-h", "--help"}, + usageHelp = true, + description = "Displays this help message and quits.", + defaultValue = "true") + private Boolean showHelp; + + @Override + public void run() { + if (showHelp) { + CommandLine.usage(this, System.out); + } + } + + public static void main(String[] args) { + new CommandLine(new SampleCommand()).execute(args); + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/README.md b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/README.md new file mode 100644 index 00000000..511b3bf9 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/README.md @@ -0,0 +1,303 @@ +# Run Analytical Queries on Sample Data by Using ScalarDB Analytics with PostgreSQL + +This tutorial describes how to run analytical queries on sample data by using ScalarDB Analytics with PostgreSQL. + +## Overview + +This sample tutorial shows how you can run two types of queries: a single-table query and a multi-table query. + +### What you can do in this sample tutorial + +This sample tutorial shows how you can run the following types of queries: + +- Read data and calculate summaries. +- Join tables that span multiple storages. + +{% capture notice--info %} +**Note** + +You can run any arbitrary query that PostgreSQL supports on the imported tables in this sample tutorial. Since ScalarDB Analytics with PostgreSQL supports all queries that PostgreSQL supports, you can use not only join, aggregation, filtering, and ordering as shown in the example, but also the window function, lateral join, or various analytical operations. + +To see which types of queries PostgreSQL supports, see the [PostgreSQL documentation](https://www.postgresql.org/docs/current/index.html). +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Prerequisites + +- [Docker](https://www.docker.com/get-started/) 20.10 or later with [Docker Compose](https://docs.docker.com/compose/install/) V2 or later +- [psql](https://www.postgresql.org/docs/current/app-psql.html) + +## Set up ScalarDB Analytics with PostgreSQL + +First, you must set up the database to run analytical queries with ScalarDB Analytics with PostgreSQL. If you haven't set up the database yet, please follow the instructions in [Getting Started](https://scalardb.scalar-labs.com/docs/latest/scalardb-analytics-postgresql/getting-started). + +### Schema details in ScalarDB + +In this sample tutorial, you have tables with the following schema in the ScalarDB database: + +```mermaid +erDiagram + "dynamons.customer" ||--|{ "postgresns.orders" : "custkey" + "dynamons.customer" { + int c_custkey + text c_name + text c_address + int c_nationkey + text c_phone + double c_acctbal + text c_mktsegment + text c_comment + } + "postgresns.orders" ||--|{ "cassandrans.lineitem" : "orderkey" + "postgresns.orders" { + int o_orderkey + int o_custkey + text o_orderstatus + double o_totalprice + text o_orderdate + text o_orderpriority + text o_clerk + int o_shippriority + text o_comment + } + "cassandrans.lineitem" { + int l_orderkey + int l_partkey + int l_suppkey + int l_linenumber + double l_quantity + double l_extendedprice + double l_discount + double l_tax + text l_returnflag + text l_linestatus + text l_shipdate + text l_commitdate + text l_receiptdate + text l_shipinstruct + text l_shipmode + text l_comment + } +``` + +For reference, this diagram shows the following: + +- `dynamons`, `postgresns`, and `cassandrans`. Namespaces that are mapped to the back-end storages of DynamoDB, PostgreSQL, and Cassandra, respectively. +- `dynamons.customer`. A table that represents information about customers. This table includes attributes like customer key, name, address, phone number, and account balance. +- `postgresns.orders`. A table that contains information about orders that customers have placed. This table includes attributes like order key, customer key, order status, order date, and order priority. +- `cassandrans.lineitem`. A table that represents line items associated with orders. This table includes attributes such as order key, part key, supplier key, quantity, price, and shipping date. + +### Schema details in PostgreSQL + +By running the Schema Importer when setting up ScalarDB, you can import the table schema in the ScalarDB database into the PostgreSQL database. More precisely, for each `namespace_name.table_name` table in the ScalarDB database, you will have a foreign table for `namespace_name._table_name` and a view for `namespace_name.table_name` in the PostgreSQL database. + +The created foreign table contains columns that are identical to the ScalarDB table and the transaction metadata columns that ScalarDB manages internally. Since the created view is defined to exclude the transaction metadata columns from the foreign table, the created view contains only the same columns as the ScalarDB table. + +You can find the schema of the ScalarDB tables in `schema.json`. For example, the `dynamons.customer` table is defined as follows: + +```json + "dynamons.customer": { + "transaction": true, + "partition-key": [ + "c_custkey" + ], + "columns": { + "c_custkey": "INT", + "c_name": "TEXT", + "c_address": "TEXT", + "c_nationkey": "INT", + "c_phone": "TEXT", + "c_acctbal": "DOUBLE", + "c_mktsegment": "TEXT", + "c_comment": "TEXT" + } + }, +``` + +To see the foreign table for `dynamons._customer` in the PostgreSQL database, run the following command and enter your PostgreSQL user password when prompted: + +```console +$ psql -U postgres -h localhost test -c '\d dynamons._customer'; +``` + +After entering your password, you should see the following output, which shows the same `c_` columns as in the `dynamons.customer` table: + +```console + Foreign table "dynamons._customer" + Column | Type | Collation | Nullable | Default | FDW options +------------------------+------------------+-----------+----------+---------+------------- + c_custkey | integer | | | | + c_name | text | | | | + c_address | text | | | | + c_nationkey | integer | | | | + c_phone | text | | | | + c_acctbal | double precision | | | | + c_mktsegment | text | | | | + c_comment | text | | | | + tx_id | text | | | | + tx_version | integer | | | | + tx_state | integer | | | | + tx_prepared_at | bigint | | | | + tx_committed_at | bigint | | | | + before_tx_id | text | | | | + before_tx_version | integer | | | | + before_tx_state | integer | | | | + before_tx_prepared_at | bigint | | | | + before_tx_committed_at | bigint | | | | + before_c_name | text | | | | + before_c_address | text | | | | + before_c_nationkey | integer | | | | + before_c_phone | text | | | | + before_c_acctbal | double precision | | | | + before_c_mktsegment | text | | | | + before_c_comment | text | | | | +Server: multi_storage_dynamodb +FDW options: (namespace 'dynamons', table_name 'customer') +``` + +As you can see in the foreign table, the table also contains the transaction metadata columns. These columns are required to ensure the Read Committed isolation level. + +To see the view for `dynamons.customer`, run the following command and enter your PostgreSQL user password when prompted: + +```console +$ psql -U postgres -h localhost test -c '\d dynamons.customer'; +``` + +After entering your password, you should see the following output: + +```console + View "dynamons.customer" + Column | Type | Collation | Nullable | Default +--------------+------------------+-----------+----------+--------- + c_custkey | integer | | | + c_name | text | | | + c_address | text | | | + c_nationkey | integer | | | + c_phone | text | | | + c_acctbal | double precision | | | + c_mktsegment | text | | | + c_comment | text | | | +``` + +The column definitions in this view are the same as the original table in the ScalarDB database. This view is created based on the foreign table explained above to expose only the valid data with the Read Committed isolation level by interpreting the transaction metadata columns. + +{% capture notice--info %} +**Note** + +Normally, you don't need to access the foreign tables directly. Instead, you can equate the views with the tables in the ScalarDB database. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +For details about type mapping between ScalarDB and PostgreSQL, see [Data-type mapping between ScalarDB and other databases](https://scalardb.scalar-labs.com/docs/latest/schema-loader/#data-type-mapping-between-scalardb-and-other-databases). + +## Run analytical queries + +The following sections describe how to read data, calculate summaries, and join tables that span multiple storages. + +### Read data and calculate summaries + +You can run a query that reads data from `cassandrans.lineitem`, with the actual data stored in the Cassandra back-end, and calculates several summaries of the ordered line items by aggregating the data. + +To run the query, log in to the psql terminal by running the following command: + +```console +$ psql -U postgres -h localhost test +``` + +After entering your password, enter the following query into the psql terminal: + +```console +SELECT + l_returnflag, + l_linestatus, + sum(l_quantity) AS sum_qty, + sum(l_extendedprice) AS sum_base_price, + sum(l_extendedprice * (1 - l_discount)) AS sum_disc_price, + sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) AS sum_charge, + avg(l_quantity) AS avg_qty, + avg(l_extendedprice) AS avg_price, + avg(l_discount) AS avg_disc, + count(*) AS count_order +FROM + cassandrans.lineitem +WHERE + to_date(l_shipdate, 'YYYY-MM-DD') <= date '1998-12-01' - 3 +GROUP BY + l_returnflag, + l_linestatus +ORDER BY + l_returnflag, + l_linestatus; +``` + +You should see the following output: + +```console + l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order +--------------+--------------+---------+--------------------+--------------------+--------------------+---------------------+--------------------+---------------------+------------- + A | F | 1519 | 2374824.6560430005 | 1387363.5818635763 | 1962762.9341866106 | 26.6491228070175439 | 41663.590456894744 | 0.4150182982456142 | 57 + N | F | 98 | 146371.22954200002 | 85593.92837883368 | 121041.52567369482 | 32.6666666666666667 | 48790.409847333336 | 0.4098473333333333 | 3 + N | O | 5374 | 8007373.247144971 | 4685645.630765834 | 6624209.157932242 | 24.4272727272727273 | 36397.15112338623 | 0.414759749999999 | 220 + R | F | 1461 | 2190869.967642001 | 1284177.8484816086 | 1814150.7929095028 | 25.1896551724137931 | 37773.62013175864 | 0.41323520689655185 | 58 +(4 rows) +``` + +### Join tables that span multiple storages + +You can also run a query to join tables that are connected to the three back-end storages and calculate the unshipped orders with the highest revenue on a particular date. + +To run the query, log in to the psql terminal by running the following command: + +```console +$ psql -U postgres -h localhost test +``` + +After entering your password, enter the following query into the psql terminal: + +```console +SELECT + l_orderkey, + sum(l_extendedprice * (1 - l_discount)) AS revenue, + o_orderdate, + o_shippriority +FROM + dynamons.customer, + postgresns.orders, + cassandrans.lineitem +WHERE + c_mktsegment = 'AUTOMOBILE' + AND c_custkey = o_custkey + AND l_orderkey = o_orderkey + AND o_orderdate < '1995-03-15' + AND l_shipdate > '1995-03-15' +GROUP BY + l_orderkey, + o_orderdate, + o_shippriority +ORDER BY + revenue DESC, + o_orderdate, + l_orderkey +LIMIT 10; +``` + +You should see the following output: + +```console + l_orderkey | revenue | o_orderdate | o_shippriority +------------+--------------------+-------------+---------------- + 1071617 | 128186.94002748765 | 1995-03-10 | 0 + 1959075 | 33104.49713665398 | 1994-12-23 | 0 + 430243 | 19476.107574179696 | 1994-12-24 | 0 +(3 rows) +``` + +## Stop ScalarDB Analytics with PostgreSQL and the database + +To stop ScalarDB Analytics with PostgreSQL and the database, stop the Docker container by running the following command: + +```console +$ docker-compose down +``` diff --git a/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/data/customer.csv b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/data/customer.csv new file mode 100644 index 00000000..7ba6cc94 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/data/customer.csv @@ -0,0 +1,10 @@ +1,"Customer#000000001","IVhzIApeRb ot,c,E",15,"25-989-741-2988",711.4294967352,"BUILDING","to the even, regular platelets. regular, ironic epitaphs nag e" +2,"Customer#000000002","XSTf4,NCwDVaWNe6tEgvwfmRchLXak",13,"23-768-687-3665",121.4294967361,"AUTOMOBILE","l accounts. blithely ironic theodolites integrate boldly: caref" +3,"Customer#000000003","MG9kdTD2WBHm",1,"11-719-748-3364",7498.4294967308,"AUTOMOBILE"," deposits eat slyly ironic, even instructions. express foxes detect slyly. blithely even accounts abov" +4,"Customer#000000004","XxVSJsLAGtn",4,"14-128-190-5944",2866.4294967379,"MACHINERY"," requests. final, regular ideas sleep final accou" +5,"Customer#000000005","KvpyuHCplrB84WgAiGV6sYpZq7Tj",3,"13-750-942-6364",794.4294967343,"HOUSEHOLD","n accounts will have to unwind. foxes cajole accor" +6,"Customer#000000006","sKZz0CsnMD7mp4Xd0YrBvx,LREYKUWAh yVn",20,"30-114-968-4951",7638.4294967353,"AUTOMOBILE","tions. even deposits boost according to the slyly bold packages. final accounts cajole requests. furious" +7,"Customer#000000007","TcGe5gaZNgVePxU5kRrvXBfkasDTea",18,"28-190-982-9759",9561.4294967391,"AUTOMOBILE","ainst the ironic, express theodolites. express, even pinto beans among the exp" +8,"Customer#000000008","I0B10bB0AymmC, 0PrRYBCP1yGJ8xcBPmWhl5",17,"27-147-574-9335",6819.4294967370,"BUILDING","among the slyly regular theodolites kindle blithely courts. carefully even theodolites haggle slyly along the ide" +9,"Customer#000000009","xKiAFTjUsCuxfeleNqefumTrjS",8,"18-338-906-3675",8324.4294967303,"FURNITURE","r theodolites according to the requests wake thinly excuses: pending requests haggle furiousl" +10,"Customer#000000010","6LrEaV6KR6PLVcgl2ArL Q3rqzLzcT1 v2",5,"15-741-346-9870",2753.4294967350,"HOUSEHOLD","es regular deposits haggle. fur" diff --git a/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/data/lineitem.csv b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/data/lineitem.csv new file mode 100644 index 00000000..400eb71e --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/data/lineitem.csv @@ -0,0 +1,338 @@ +36422,154112,6628,1,27,31484.3705477335202398305,0.3705477335202398209,0.3705477335202398215,"N","O","1997-05-02","1997-05-01","1997-05-25","TAKE BACK RETURN","TRUCK","into beans. blithely regular reque" +36422,146555,6556,2,37,59257.4294967331,0.4294967300,0.4294967297,"N","O","1997-06-03","1997-04-07","1997-06-17","COLLECT COD","SHIP","g deposits are about the as" +36422,145767,3310,3,42,76135.4294967388,0.4294967303,0.4294967303,"N","O","1997-05-18","1997-05-09","1997-05-28","NONE","MAIL","ly. regular packages sleep. " +36422,90688,689,4,14,23501.4294967348,0.4294967296,0.4294967298,"N","O","1997-06-27","1997-05-12","1997-07-23","DELIVER IN PERSON","SHIP","eposits. even,regular packages snoo" +36422,183949,8986,5,31,63021.4294967310,0.4294967305,0.4294967300,"N","O","1997-03-29","1997-05-25","1997-04-19","DELIVER IN PERSON","REG AIR"," the quickly stea" +36422,17008,2011,6,9,8325.4294967296,0.4294967302,0.4294967301,"N","O","1997-06-10","1997-05-14","1997-06-30","TAKE BACK RETURN","REG AIR","ajole. fluff" +36422,48167,5680,7,27,30109.4294967328,0.4294967302,0.4294967300,"N","O","1997-06-18","1997-05-14","1997-07-15","NONE","AIR","kages. blithely final excuses" +135943,98961,3980,1,37,72518.3705477335202398260,0.3705477335202398211,0.3705477335202398213,"A","F","1993-07-07","1993-09-17","1993-07-22","DELIVER IN PERSON","AIR"," the final requests. " +135943,36541,4051,2,37,54668.4294967394,0.4294967305,0.4294967304,"R","F","1993-08-04","1993-08-28","1993-09-01","NONE","FOB","the regular pac" +135943,143743,6258,3,43,76829.4294967378,0.4294967306,0.4294967300,"A","F","1993-09-12","1993-07-22","1993-09-29","COLLECT COD","TRUCK","ve carefully acros" +135943,74859,4860,4,28,51347.4294967376,0.4294967298,0.4294967304,"A","F","1993-09-27","1993-09-07","1993-10-27","DELIVER IN PERSON","SHIP","y final deposi" +135943,153657,1203,5,34,58162.4294967306,0.4294967297,0.4294967301,"A","F","1993-10-12","1993-09-06","1993-10-31","TAKE BACK RETURN","AIR","ironic excuses are slyl" +164711,82527,52,1,34,51323.3705477335202398276,0.3705477335202398217,0.3705477335202398213,"A","F","1992-04-27","1992-07-23","1992-05-22","NONE","AIR","kages. regular i" +164711,26184,1189,2,15,16652.4294967366,0.4294967297,0.4294967296,"A","F","1992-05-18","1992-06-12","1992-06-11","TAKE BACK RETURN","TRUCK","raids doubt aga" +164711,67393,9900,3,38,51694.4294967378,0.4294967296,0.4294967296,"R","F","1992-07-17","1992-06-11","1992-08-12","DELIVER IN PERSON","MAIL","eodolites cajol" +164711,65941,8448,4,38,72463.4294967368,0.4294967305,0.4294967300,"A","F","1992-07-16","1992-07-05","1992-08-06","TAKE BACK RETURN","FOB","y bold courts; blithely" +164711,69788,9789,5,34,59764.4294967348,0.4294967300,0.4294967302,"R","F","1992-05-09","1992-05-26","1992-05-24","DELIVER IN PERSON","RAIL","ccounts detect among the carefu" +164711,122878,5391,6,33,62728.4294967367,0.4294967299,0.4294967303,"A","F","1992-06-12","1992-06-25","1992-07-03","NONE","TRUCK","cuses use afte" +224167,120715,5740,1,7,12149.3705477335202398305,0.3705477335202398211,0.3705477335202398208,"N","O","1996-06-10","1996-07-27","1996-06-22","NONE","REG AIR","unts. furiously sile" +224167,55920,931,2,11,20635.4294967308,0.4294967298,0.4294967297,"N","O","1996-07-05","1996-07-11","1996-07-17","NONE","AIR"," use blithely" +224167,45636,8141,3,18,28469.4294967330,0.4294967303,0.4294967301,"N","O","1996-05-26","1996-07-22","1996-05-28","TAKE BACK RETURN","RAIL","s. slyly unusual pinto beans boos" +224167,199327,1847,4,18,25673.4294967372,0.4294967298,0.4294967296,"N","O","1996-08-25","1996-07-12","1996-09-19","TAKE BACK RETURN","AIR","ng to the fluffily" +287619,42361,7370,1,11,14336.3705477335202398304,0.3705477335202398213,0.3705477335202398208,"N","O","1997-04-14","1997-02-11","1997-05-04","NONE","MAIL","ss the pending platelets wake a" +385825,161438,6471,1,47,70473.3705477335202398229,0.3705477335202398209,0.3705477335202398215,"N","O","1996-01-17","1995-12-27","1996-02-06","DELIVER IN PERSON","RAIL","rses. unusual,special foxes use. f" +385825,74506,7014,2,2,2961.4294967296,0.4294967296,0.4294967302,"N","O","1995-12-17","1996-01-15","1996-01-11","NONE","FOB","eas according to the regular accounts " +385825,10379,5382,3,40,51574.4294967376,0.4294967305,0.4294967299,"N","O","1996-02-03","1996-01-06","1996-02-24","COLLECT COD","SHIP"," carefully pending accounts. furiousl" +385825,32887,5391,4,28,50956.4294967360,0.4294967299,0.4294967303,"N","O","1995-11-05","1995-12-17","1995-11-29","NONE","FOB","s hang slyly unusual accounts. re" +385825,118088,3111,5,22,24333.4294967372,0.4294967306,0.4294967296,"N","O","1995-12-11","1996-01-06","1995-12-28","NONE","MAIL"," warhorses hinder slyly a" +385825,88806,3823,6,16,28716.4294967376,0.4294967301,0.4294967304,"N","O","1995-12-25","1995-12-27","1996-01-05","TAKE BACK RETURN","REG AIR","iously final theodolites boost " +385825,148780,8781,7,28,51205.4294967380,0.4294967304,0.4294967296,"N","O","1996-02-15","1996-01-10","1996-02-22","TAKE BACK RETURN","MAIL","arefully even " +430243,155290,5291,1,23,30941.3705477335202398275,0.3705477335202398209,0.3705477335202398216,"A","F","1995-03-28","1995-03-10","1995-04-06","COLLECT COD","SHIP","ter the fluffily even instructions. furi" +454791,98296,5824,1,40,51771.3705477335202398268,0.3705477335202398213,0.3705477335202398213,"R","F","1992-06-24","1992-06-20","1992-07-11","TAKE BACK RETURN","MAIL"," busy theodol" +454791,52971,487,2,13,25011.4294967357,0.4294967306,0.4294967298,"A","F","1992-05-02","1992-05-19","1992-05-10","NONE","REG AIR","ily silent id" +579908,104617,7128,1,33,53513.3705477335202398221,0.3705477335202398208,0.3705477335202398209,"N","O","1997-03-30","1997-01-11","1997-04-25","NONE","MAIL","uickly special ideas alongside" +816323,149483,4512,1,12,18389.3705477335202398284,0.3705477335202398212,0.3705477335202398216,"N","O","1996-05-21","1996-03-10","1996-06-13","DELIVER IN PERSON","REG AIR","ronic platelets. ironic,ironic" +816323,153105,3106,2,20,23162.4294967296,0.4294967299,0.4294967302,"N","O","1996-03-06","1996-04-17","1996-04-02","NONE","RAIL"," the unusual theodolites. carefully bold d" +816323,33413,923,3,40,53856.4294967336,0.4294967301,0.4294967304,"N","O","1996-03-13","1996-03-29","1996-04-07","COLLECT COD","TRUCK","lly pending packages wake f" +816323,161159,3676,4,25,30503.4294967371,0.4294967302,0.4294967302,"N","O","1996-04-14","1996-03-12","1996-05-01","NONE","TRUCK","thely ironic foxes cajole" +816323,182701,2702,5,36,64213.4294967316,0.4294967300,0.4294967299,"N","O","1996-03-13","1996-02-29","1996-03-31","TAKE BACK RETURN","RAIL","arefully express accounts " +816323,42686,7695,6,21,34202.4294967324,0.4294967299,0.4294967303,"N","O","1996-03-14","1996-02-24","1996-03-24","TAKE BACK RETURN","SHIP","r the carefully final somas; " +816323,16221,3725,7,19,21607.4294967314,0.4294967300,0.4294967300,"N","O","1996-02-05","1996-03-04","1996-02-15","COLLECT COD","REG AIR","round the carefully" +859108,142807,7836,1,40,73992.3705477335202398208,0.3705477335202398213,0.3705477335202398208,"N","O","1996-03-31","1996-03-25","1996-04-21","COLLECT COD","REG AIR","ully bold theodolites according to " +859108,146837,4380,2,33,62166.4294967335,0.4294967303,0.4294967303,"N","O","1996-04-03","1996-03-25","1996-04-07","NONE","FOB","regular frays. furiou" +859108,89079,9080,3,3,3204.4294967317,0.4294967303,0.4294967302,"N","O","1996-05-15","1996-05-12","1996-05-20","DELIVER IN PERSON","MAIL","final platelets are. furiously expre" +883557,187324,2361,1,40,56452.3705477335202398288,0.3705477335202398218,0.3705477335202398214,"N","O","1998-06-18","1998-05-27","1998-07-16","COLLECT COD","MAIL","lyly along the fina" +895172,12359,2360,1,27,34326.3705477335202398253,0.3705477335202398217,0.3705477335202398211,"N","O","1996-03-27","1996-01-17","1996-04-08","COLLECT COD","RAIL","jole blithely pending dependencies." +895172,117617,5151,2,39,63749.4294967375,0.4294967297,0.4294967302,"N","O","1996-03-11","1996-01-24","1996-04-03","DELIVER IN PERSON","RAIL",". doggedly bol" +895172,172208,7243,3,29,37125.4294967376,0.4294967299,0.4294967298,"N","O","1996-02-09","1996-02-03","1996-02-13","DELIVER IN PERSON","FOB"," theodolites. theodolites wake daringly" +895172,87784,7785,4,30,53153.4294967336,0.4294967296,0.4294967296,"N","O","1995-12-29","1996-01-07","1996-01-19","DELIVER IN PERSON","TRUCK","ructions haggle b" +895172,185046,2601,5,19,21489.4294967372,0.4294967301,0.4294967298,"N","O","1996-01-27","1996-02-26","1996-02-26","COLLECT COD","SHIP","e furiously special packa" +895172,63141,3142,6,3,3312.4294967338,0.4294967306,0.4294967303,"N","O","1996-01-20","1996-02-14","1996-01-22","DELIVER IN PERSON","TRUCK","oss the even,final packages. d" +895172,197028,9548,7,22,24750.4294967340,0.4294967306,0.4294967303,"N","O","1996-02-29","1996-01-20","1996-03-27","DELIVER IN PERSON","SHIP","ans across the carefully final dep" +905633,20814,8321,1,15,26022.3705477335202398223,0.3705477335202398216,0.3705477335202398214,"N","O","1995-09-07","1995-09-01","1995-10-04","NONE","RAIL","usual asymptotes boost among t" +905633,10167,2669,2,46,49549.4294967332,0.4294967306,0.4294967300,"N","O","1995-10-11","1995-10-01","1995-10-17","DELIVER IN PERSON","SHIP","fully ironic " +905633,64302,6809,3,39,49385.4294967366,0.4294967303,0.4294967300,"N","O","1995-07-24","1995-09-25","1995-08-07","NONE","REG AIR","ly pending acc" +905633,111751,1752,4,43,75798.4294967321,0.4294967301,0.4294967298,"N","O","1995-08-08","1995-08-23","1995-08-16","DELIVER IN PERSON","SHIP","e furiously reg" +905633,183408,8445,5,3,4474.4294967316,0.4294967296,0.4294967300,"N","O","1995-08-25","1995-08-14","1995-09-06","TAKE BACK RETURN","FOB","blithely regular packages nag car" +905633,123317,854,6,38,50931.4294967374,0.4294967304,0.4294967297,"N","O","1995-08-17","1995-08-08","1995-08-22","DELIVER IN PERSON","REG AIR","refully dogged foxes. slyl" +905633,172297,2298,7,12,16431.4294967344,0.4294967301,0.4294967301,"N","O","1995-08-03","1995-08-24","1995-08-08","NONE","MAIL","usly special ideas affix after t" +916775,56922,4438,1,35,65762.3705477335202398228,0.3705477335202398214,0.3705477335202398210,"N","O","1996-08-01","1996-07-22","1996-08-29","COLLECT COD","RAIL","nal requests." +916775,76682,9190,2,40,66347.4294967316,0.4294967300,0.4294967301,"N","O","1996-08-24","1996-06-02","1996-09-11","DELIVER IN PERSON","RAIL","furiously " +916775,178845,8846,3,26,50019.4294967380,0.4294967296,0.4294967301,"N","O","1996-05-25","1996-06-17","1996-06-08","TAKE BACK RETURN","RAIL","lar theodolites eat blithely even de" +916775,44007,1520,4,29,27579.4294967296,0.4294967300,0.4294967300,"N","O","1996-08-10","1996-06-19","1996-09-02","DELIVER IN PERSON","SHIP","s sleep along the bold deposits." +916775,11158,6161,5,3,3207.4294967341,0.4294967306,0.4294967304,"N","O","1996-05-10","1996-06-30","1996-05-12","NONE","RAIL","its. quickly ironic instructions can" +916775,120334,7871,6,36,48755.4294967384,0.4294967296,0.4294967298,"N","O","1996-07-16","1996-05-29","1996-08-15","COLLECT COD","MAIL","hockey players " +916775,43250,5755,7,15,17898.4294967371,0.4294967305,0.4294967301,"N","O","1996-07-16","1996-07-01","1996-08-13","COLLECT COD","RAIL","ic pearls. furiously ironic ideas h" +1071617,36850,1857,1,27,48244.3705477335202398303,0.3705477335202398208,0.3705477335202398211,"N","O","1995-07-09","1995-05-15","1995-07-16","DELIVER IN PERSON","MAIL","ic packages are. spe" +1071617,137384,2411,2,11,15635.4294967314,0.4294967306,0.4294967297,"R","F","1995-04-08","1995-04-16","1995-04-13","COLLECT COD","MAIL","ke furiously furiously regular " +1071617,3374,8375,3,36,45985.4294967328,0.4294967298,0.4294967303,"N","O","1995-07-03","1995-04-26","1995-07-17","DELIVER IN PERSON","FOB","express requests. even patterns " +1071617,90308,309,4,30,38949.4294967296,0.4294967299,0.4294967302,"N","F","1995-06-12","1995-05-28","1995-06-26","DELIVER IN PERSON","REG AIR","l ideas. blithely ironic instructions are " +1071617,194723,2281,5,39,70891.4294967304,0.4294967303,0.4294967301,"A","F","1995-06-08","1995-06-03","1995-06-15","TAKE BACK RETURN","AIR","blithely express hockey" +1073670,79911,9912,1,41,77527.3705477335202398239,0.3705477335202398217,0.3705477335202398216,"R","F","1994-09-06","1994-07-16","1994-09-17","COLLECT COD","AIR","theodolites. regular deposits brea" +1192231,103367,3368,1,29,39740.3705477335202398252,0.3705477335202398209,0.3705477335202398215,"N","O","1996-06-06","1996-08-24","1996-06-15","NONE","MAIL","across the blithe" +1192231,86763,1780,2,9,15747.4294967380,0.4294967297,0.4294967301,"N","O","1996-09-25","1996-07-22","1996-10-09","TAKE BACK RETURN","TRUCK","careful accounts. even ideas haggle aga" +1192231,10325,326,3,15,18529.4294967376,0.4294967300,0.4294967299,"N","O","1996-09-25","1996-07-31","1996-09-30","COLLECT COD","REG AIR"," the furiously furious deposits wake fur" +1192231,117347,4881,4,2,2728.4294967364,0.4294967304,0.4294967300,"N","O","1996-07-10","1996-07-24","1996-07-12","NONE","MAIL","osits. fluffily regular courts sle" +1192231,43478,8487,5,26,36958.4294967318,0.4294967297,0.4294967299,"N","O","1996-06-19","1996-08-18","1996-06-23","NONE","TRUCK","fully regular instructions" +1192231,120368,369,6,19,26378.4294967380,0.4294967298,0.4294967300,"N","O","1996-07-14","1996-08-12","1996-07-16","DELIVER IN PERSON","AIR","bold packages haggle furiously against th" +1201223,59527,7043,1,44,65406.3705477335202398296,0.3705477335202398212,0.3705477335202398209,"N","O","1996-02-20","1996-04-02","1996-03-19","NONE","FOB","g,unusual platel" +1201223,66105,8612,2,2,2142.4294967316,0.4294967300,0.4294967297,"N","O","1996-01-25","1996-04-04","1996-02-10","NONE","AIR","kly special deposits wake regularly a" +1201223,19891,2393,3,41,74246.4294967345,0.4294967298,0.4294967297,"N","O","1996-02-07","1996-03-31","1996-02-25","DELIVER IN PERSON","MAIL","ayers. ideas boost at the packages. depos" +1226497,101559,4070,1,32,49937.3705477335202398268,0.3705477335202398216,0.3705477335202398214,"R","F","1993-11-02","1993-12-04","1993-11-22","NONE","MAIL",". final,unusual deposits cajole slowly. i" +1226497,98547,6075,2,27,41729.4294967354,0.4294967302,0.4294967297,"R","F","1994-01-09","1993-12-10","1994-01-15","TAKE BACK RETURN","RAIL","e quickly along th" +1374019,45052,7557,1,39,38884.3705477335202398303,0.3705477335202398215,0.3705477335202398208,"A","F","1992-05-01","1992-05-20","1992-05-25","TAKE BACK RETURN","TRUCK","special,special deposits among the f" +1374019,178552,1070,2,1,1630.4294967351,0.4294967306,0.4294967296,"A","F","1992-06-21","1992-05-26","1992-07-18","DELIVER IN PERSON","AIR","en theodolites. furiously unus" +1374019,111572,6595,3,13,20586.4294967337,0.4294967303,0.4294967297,"R","F","1992-05-10","1992-06-25","1992-05-30","TAKE BACK RETURN","SHIP","furiously ac" +1374019,182631,7668,4,42,71972.4294967342,0.4294967305,0.4294967297,"A","F","1992-07-06","1992-06-28","1992-08-05","DELIVER IN PERSON","TRUCK","deposits sleep across the ironic deposits" +1374019,62432,9951,5,33,46016.4294967315,0.4294967306,0.4294967302,"A","F","1992-05-20","1992-06-03","1992-05-25","DELIVER IN PERSON","TRUCK","e slyly even instructions. furiously i" +1485505,90924,5943,1,46,88086.3705477335202398240,0.3705477335202398217,0.3705477335202398208,"N","O","1998-10-28","1998-09-12","1998-11-07","COLLECT COD","REG AIR","s instruct" +1485505,150483,484,2,34,52138.4294967328,0.4294967302,0.4294967299,"N","O","1998-10-07","1998-09-24","1998-11-02","TAKE BACK RETURN","AIR","ose ideas " +1485505,151566,1567,3,6,9705.4294967332,0.4294967306,0.4294967299,"N","O","1998-10-10","1998-09-14","1998-11-05","NONE","AIR","ests according " +1485505,168504,6053,4,32,50320.4294967296,0.4294967302,0.4294967300,"N","O","1998-11-18","1998-10-19","1998-11-22","TAKE BACK RETURN","AIR","re blithely" +1485505,33239,3240,5,10,11722.4294967326,0.4294967300,0.4294967297,"N","O","1998-09-15","1998-10-05","1998-10-13","TAKE BACK RETURN","RAIL","ular deposits wake blithely across th" +1485505,31850,6857,6,50,89092.4294967346,0.4294967301,0.4294967298,"N","O","1998-09-18","1998-10-10","1998-10-03","TAKE BACK RETURN","RAIL","dependencies above the f" +1490087,104957,4958,1,22,43162.3705477335202398298,0.3705477335202398210,0.3705477335202398214,"N","O","1996-09-16","1996-09-30","1996-10-13","COLLECT COD","RAIL","nag; carefully i" +1490087,6676,9177,2,45,71220.4294967311,0.4294967298,0.4294967298,"N","O","1996-08-26","1996-08-10","1996-09-14","DELIVER IN PERSON","AIR","c,ironic requests haggle blithely" +1490087,194371,9410,3,14,20515.4294967314,0.4294967304,0.4294967301,"N","O","1996-09-09","1996-08-13","1996-10-09","TAKE BACK RETURN","SHIP","cajole fluffily about the instruc" +1490087,180589,8144,4,30,50087.4294967336,0.4294967300,0.4294967303,"N","O","1996-07-24","1996-09-16","1996-08-23","TAKE BACK RETURN","SHIP","old dolphins. quickly ironic accoun" +1490087,155076,107,5,36,40718.4294967348,0.4294967301,0.4294967300,"N","O","1996-10-31","1996-08-09","1996-11-26","TAKE BACK RETURN","REG AIR","al theodolites haggle." +1590469,21835,1836,1,12,21081.3705477335202398304,0.3705477335202398213,0.3705477335202398210,"N","O","1997-07-06","1997-05-14","1997-07-07","NONE","AIR",",pending accounts are furiously even foxe" +1590469,180776,3295,2,11,20424.4294967343,0.4294967299,0.4294967298,"N","O","1997-04-02","1997-05-23","1997-04-12","COLLECT COD","REG AIR","etect carefully. blithely ironic dolphins " +1590469,100142,7673,3,30,34264.4294967316,0.4294967296,0.4294967299,"N","O","1997-05-08","1997-04-23","1997-06-06","NONE","SHIP","he thinly even in" +1755398,175059,7577,1,1,1134.3705477335202398213,0.3705477335202398213,0.3705477335202398213,"N","O","1997-08-04","1997-09-09","1997-08-05","TAKE BACK RETURN","RAIL","carefully fi" +1763205,12629,5131,1,10,15416.3705477335202398228,0.3705477335202398208,0.3705477335202398215,"A","F","1994-10-30","1994-10-24","1994-11-23","NONE","REG AIR",". fluffily regular packages " +1774689,75407,422,1,10,13824.3705477335202398208,0.3705477335202398209,0.3705477335202398209,"R","F","1993-07-12","1993-09-26","1993-07-30","NONE","MAIL","s. slyly express frays are furio" +1842406,84138,6647,1,13,14587.3705477335202398277,0.3705477335202398209,0.3705477335202398212,"N","O","1996-11-30","1996-10-15","1996-12-01","DELIVER IN PERSON","SHIP","idle,bold requests wake fluff" +1842406,105207,2738,2,6,7273.4294967316,0.4294967296,0.4294967298,"N","O","1996-09-05","1996-09-24","1996-09-11","NONE","REG AIR","le blithely: express,bold accounts sl" +1842406,10233,7737,3,7,8002.4294967357,0.4294967303,0.4294967297,"N","O","1996-10-23","1996-11-01","1996-11-14","TAKE BACK RETURN","REG AIR","lyly. regular ideas a" +1842406,177150,2185,4,48,58903.4294967316,0.4294967297,0.4294967303,"N","O","1996-08-17","1996-09-07","1996-08-28","DELIVER IN PERSON","TRUCK","ms haggle according to the un" +1842406,159230,1746,5,40,51569.4294967316,0.4294967301,0.4294967302,"N","O","1996-09-13","1996-10-20","1996-10-02","TAKE BACK RETURN","RAIL","s haggle. express,regular packages amon" +1842406,166502,1535,6,19,29801.4294967346,0.4294967301,0.4294967302,"N","O","1996-10-17","1996-10-08","1996-11-09","DELIVER IN PERSON","REG AIR","onic theodolites integrate among the furio" +1859778,64986,4987,1,40,78039.3705477335202398228,0.3705477335202398210,0.3705477335202398209,"N","O","1998-03-29","1998-03-23","1998-04-01","DELIVER IN PERSON","MAIL"," slyly regular theodolites along" +1859778,15737,3241,2,5,8263.4294967361,0.4294967298,0.4294967301,"N","O","1998-04-11","1998-03-07","1998-04-22","NONE","AIR","grate quickly requests. " +1859778,83429,8446,3,46,64971.4294967328,0.4294967303,0.4294967303,"N","O","1998-02-20","1998-03-06","1998-02-27","COLLECT COD","FOB"," accounts. careful" +1859778,190895,5934,4,27,53619.4294967299,0.4294967296,0.4294967299,"N","O","1998-05-19","1998-05-04","1998-05-21","NONE","MAIL","arefully express theodolites sleep caref" +1859778,168132,5681,5,48,57606.4294967320,0.4294967302,0.4294967303,"N","O","1998-02-17","1998-04-18","1998-03-19","TAKE BACK RETURN","MAIL","sual instructions. bo" +1894087,53573,6079,1,23,35111.3705477335202398219,0.3705477335202398208,0.3705477335202398212,"R","F","1994-03-13","1994-03-28","1994-04-05","DELIVER IN PERSON","SHIP","e final packages are even pinto beans. t" +1925447,63293,812,1,35,43970.3705477335202398223,0.3705477335202398213,0.3705477335202398210,"N","O","1997-06-04","1997-04-13","1997-06-19","DELIVER IN PERSON","FOB","iously even instructions. finally bold a" +1925447,152744,290,2,3,5390.4294967318,0.4294967306,0.4294967296,"N","O","1997-06-14","1997-05-18","1997-07-06","TAKE BACK RETURN","AIR","ss the furiou" +1925447,161053,6086,3,20,22281.4294967296,0.4294967303,0.4294967300,"N","O","1997-04-12","1997-05-20","1997-04-18","TAKE BACK RETURN","TRUCK"," special deposits. " +1925447,17020,7021,4,8,7496.4294967312,0.4294967305,0.4294967298,"N","O","1997-05-19","1997-03-27","1997-06-05","TAKE BACK RETURN","RAIL","ly carefully even th" +1925447,49532,2037,5,26,38519.4294967374,0.4294967298,0.4294967304,"N","O","1997-06-12","1997-05-01","1997-06-28","DELIVER IN PERSON","AIR","ole regular instructions. blithely special" +1925447,68539,1046,6,12,18090.4294967332,0.4294967298,0.4294967296,"N","O","1997-03-13","1997-04-15","1997-03-26","COLLECT COD","MAIL","s haggle bli" +1944711,66583,4102,1,37,57334.3705477335202398254,0.3705477335202398211,0.3705477335202398208,"R","F","1995-05-15","1995-07-29","1995-05-20","TAKE BACK RETURN","MAIL","ular forges. slyly ev" +1944711,48820,3829,2,15,26532.4294967326,0.4294967299,0.4294967298,"N","O","1995-08-06","1995-06-20","1995-09-04","TAKE BACK RETURN","MAIL","into beans according to the slyl" +1944711,103892,8913,3,13,24646.4294967353,0.4294967306,0.4294967300,"N","O","1995-07-16","1995-06-28","1995-07-31","DELIVER IN PERSON","MAIL",",final instructions." +1953441,3713,8714,1,48,77602.3705477335202398216,0.3705477335202398218,0.3705477335202398216,"N","O","1996-07-08","1996-08-09","1996-07-11","DELIVER IN PERSON","REG AIR","of the slyly pending theodolites. f" +1953441,191708,4228,2,2,3599.4294967336,0.4294967306,0.4294967300,"N","O","1996-07-24","1996-08-07","1996-08-16","DELIVER IN PERSON","RAIL","uriously express dolphins. blithely clos" +1953441,31856,9366,3,2,3575.4294967366,0.4294967298,0.4294967301,"N","O","1996-09-09","1996-07-21","1996-09-14","DELIVER IN PERSON","RAIL","furiously silent packages. blithely ir" +1953441,1186,8687,4,7,7610.4294967322,0.4294967302,0.4294967296,"N","O","1996-08-02","1996-08-07","1996-08-11","TAKE BACK RETURN","TRUCK",",bold decoys belie" +1953441,122322,9859,5,21,28230.4294967368,0.4294967301,0.4294967303,"N","O","1996-07-15","1996-08-07","1996-08-05","NONE","MAIL"," packages snooze quickly. final ide" +1953441,64462,6969,6,31,44220.4294967322,0.4294967298,0.4294967303,"N","O","1996-07-14","1996-08-21","1996-07-16","NONE","FOB","uffily final deposits. furious" +1953441,11771,4273,7,44,74041.4294967384,0.4294967303,0.4294967299,"N","O","1996-07-23","1996-07-26","1996-07-26","COLLECT COD","AIR","l braids. quickly express deposits cajole a" +1959075,56184,6185,1,27,30784.3705477335202398294,0.3705477335202398209,0.3705477335202398212,"R","F","1995-02-26","1995-03-08","1995-03-24","DELIVER IN PERSON","REG AIR","mong the ironic,final tithes. furiou" +1959075,70761,3269,2,5,8658.4294967376,0.4294967297,0.4294967299,"A","F","1995-03-23","1995-03-10","1995-04-22","TAKE BACK RETURN","RAIL","al foxes affix. furious deposits" +1959075,69504,9505,3,49,72201.4294967346,0.4294967297,0.4294967300,"A","F","1995-02-22","1995-02-01","1995-03-14","COLLECT COD","TRUCK"," asymptotes wake daringly" +1959075,152282,9828,4,37,49368.4294967332,0.4294967304,0.4294967302,"A","F","1995-03-20","1995-02-08","1995-03-24","DELIVER IN PERSON","RAIL","ly. gifts sleep " +1959075,198170,8171,5,24,30436.4294967304,0.4294967306,0.4294967303,"A","F","1995-03-03","1995-03-15","1995-03-26","NONE","AIR"," requests are slyly. requests run qui" +1978756,168332,8333,1,45,63014.3705477335202398293,0.3705477335202398212,0.3705477335202398208,"N","O","1996-09-30","1996-09-22","1996-10-06","NONE","SHIP","ing to the carefully even de" +1978756,143978,1521,2,49,99076.4294967349,0.4294967306,0.4294967297,"N","O","1996-10-20","1996-09-12","1996-10-23","TAKE BACK RETURN","FOB"," deposits wake fluffi" +1978756,26601,1606,3,35,53466.4294967296,0.4294967306,0.4294967301,"N","O","1996-10-03","1996-10-29","1996-10-21","DELIVER IN PERSON","REG AIR"," packages haggle s" +1978756,81795,1796,4,25,44419.4294967371,0.4294967303,0.4294967299,"N","O","1996-12-01","1996-10-21","1996-12-21","COLLECT COD","REG AIR","pinto beans cajole carefully enti" +1978756,124255,6768,5,22,28143.4294967346,0.4294967302,0.4294967298,"N","O","1996-12-01","1996-09-18","1996-12-20","NONE","FOB","n theodolites " +1978756,153258,5774,6,35,45893.4294967371,0.4294967300,0.4294967296,"N","O","1996-09-19","1996-09-14","1996-10-06","COLLECT COD","MAIL","packages. final,even excuses maint" +2014848,15065,7567,1,44,43122.3705477335202398272,0.3705477335202398208,0.3705477335202398214,"N","O","1997-03-28","1997-04-01","1997-04-06","COLLECT COD","SHIP","iously thin pinto beans sleep bli" +2014848,193155,3156,2,30,37444.4294967346,0.4294967305,0.4294967300,"N","O","1997-03-21","1997-02-24","1997-03-23","DELIVER IN PERSON","AIR","sly final instructions. regular " +2014848,140639,8182,3,48,80622.4294967320,0.4294967297,0.4294967301,"N","O","1997-02-01","1997-02-27","1997-02-28","NONE","SHIP","ages. furio" +2014848,112844,378,4,9,16711.4294967352,0.4294967305,0.4294967304,"N","O","1997-04-07","1997-03-26","1997-04-12","COLLECT COD","TRUCK","sly bold p" +2096544,139461,9462,1,20,30009.3705477335202398228,0.3705477335202398210,0.3705477335202398216,"R","F","1992-05-26","1992-05-22","1992-06-05","NONE","RAIL","ng to the furiously even" +2096544,109740,2251,2,13,22746.4294967358,0.4294967301,0.4294967304,"A","F","1992-05-14","1992-06-22","1992-06-03","COLLECT COD","TRUCK","ers haggle unusual epitaphs. furiou" +2096544,184592,9629,3,4,6706.4294967332,0.4294967302,0.4294967302,"R","F","1992-04-19","1992-05-31","1992-04-21","TAKE BACK RETURN","RAIL","ong the regular,express pac" +2096544,25429,434,4,9,12189.4294967374,0.4294967300,0.4294967297,"R","F","1992-07-25","1992-07-15","1992-08-17","COLLECT COD","RAIL","xes. furiously thin accounts engage furiou" +2096544,179883,9884,5,23,45146.4294967320,0.4294967298,0.4294967301,"R","F","1992-05-08","1992-06-24","1992-05-26","COLLECT COD","REG AIR","es. regular deposits" +2096544,79598,2106,6,9,14198.4294967327,0.4294967298,0.4294967302,"A","F","1992-07-19","1992-05-24","1992-07-25","DELIVER IN PERSON","SHIP","s the ironic,final accounts. carefully b" +2096544,813,814,7,31,53128.4294967307,0.4294967303,0.4294967298,"R","F","1992-06-23","1992-06-29","1992-07-11","NONE","REG AIR","ickly around the idle,even foxes. carefull" +2126688,26118,1123,1,26,27146.3705477335202398294,0.3705477335202398211,0.3705477335202398208,"R","F","1993-10-10","1993-11-02","1993-11-02","TAKE BACK RETURN","MAIL","tions cajol" +2126688,150126,127,2,20,23522.4294967336,0.4294967298,0.4294967303,"R","F","1993-08-24","1993-10-31","1993-08-28","DELIVER IN PERSON","TRUCK"," final,regular pac" +2415204,117488,5022,1,49,73768.3705477335202398260,0.3705477335202398214,0.3705477335202398212,"R","F","1994-11-17","1995-01-22","1994-12-02","NONE","AIR","counts. silent excuses" +2415204,17342,2345,2,27,34002.4294967314,0.4294967305,0.4294967297,"R","F","1995-01-10","1995-01-01","1995-01-27","TAKE BACK RETURN","RAIL","ously unusual theodolite" +2415204,62424,4931,3,5,6932.4294967306,0.4294967299,0.4294967302,"A","F","1994-11-20","1994-12-26","1994-12-07","NONE","MAIL","ests. blithely ironic pac" +2415204,149265,4294,4,7,9199.4294967378,0.4294967301,0.4294967298,"R","F","1995-01-02","1995-01-03","1995-01-21","NONE","FOB",". finally ironic deposits detect carefull" +2415204,87718,227,5,40,68228.4294967336,0.4294967300,0.4294967296,"R","F","1995-01-06","1994-12-27","1995-01-08","COLLECT COD","REG AIR","riously. sl" +2459619,85054,71,1,42,43640.3705477335202398218,0.3705477335202398209,0.3705477335202398215,"N","O","1996-01-22","1996-02-02","1996-01-27","COLLECT COD","FOB"," deposits cajole. fluffily spec" +2630562,108745,1256,1,37,64888.3705477335202398246,0.3705477335202398215,0.3705477335202398208,"R","F","1993-09-19","1993-07-30","1993-10-18","TAKE BACK RETURN","REG AIR","telets are around th" +2630562,63400,8413,2,47,64079.4294967376,0.4294967299,0.4294967298,"R","F","1993-09-29","1993-07-29","1993-10-24","TAKE BACK RETURN","FOB","refully. ironic,pen" +2630562,162921,7954,3,8,15871.4294967332,0.4294967301,0.4294967302,"A","F","1993-09-25","1993-09-17","1993-10-14","DELIVER IN PERSON","MAIL","lets after the bold,pending" +2630562,199149,4188,4,45,56166.4294967326,0.4294967298,0.4294967302,"R","F","1993-07-14","1993-09-24","1993-08-12","COLLECT COD","AIR"," regular instructions. blithe" +2630562,83201,3202,5,26,30789.4294967316,0.4294967304,0.4294967304,"A","F","1993-09-25","1993-08-29","1993-10-12","DELIVER IN PERSON","REG AIR","ously unusual accou" +2630562,155844,3390,6,23,43696.4294967328,0.4294967303,0.4294967296,"R","F","1993-08-07","1993-08-31","1993-08-19","COLLECT COD","REG AIR"," unusual realms. fur" +2630562,68414,5933,7,39,53913.4294967395,0.4294967296,0.4294967299,"A","F","1993-07-15","1993-09-10","1993-07-27","NONE","SHIP","its. blithely regular pinto beans kindle c" +2765152,19827,2329,1,5,8734.3705477335202398218,0.3705477335202398218,0.3705477335202398214,"N","O","1996-10-27","1996-10-18","1996-11-07","COLLECT COD","AIR","s boost fluffily after the instructions; p" +2917345,12883,2884,1,19,34121.3705477335202398280,0.3705477335202398214,0.3705477335202398209,"N","O","1995-12-04","1995-12-26","1995-12-26","DELIVER IN PERSON","MAIL","l frets. caref" +2917345,114072,4073,2,10,10860.4294967366,0.4294967301,0.4294967297,"N","O","1995-10-27","1995-12-22","1995-11-10","TAKE BACK RETURN","AIR","ss deposits. express," +2917345,729,5730,3,6,9778.4294967328,0.4294967303,0.4294967303,"N","O","1995-10-21","1995-12-14","1995-11-05","NONE","SHIP","sual instructions. fluf" +2943521,157394,4940,1,21,30479.3705477335202398227,0.3705477335202398208,0.3705477335202398212,"N","O","1998-02-21","1998-04-21","1998-02-22","COLLECT COD","AIR"," the bold,special pinto" +2986913,85787,5788,1,3,5318.3705477335202398242,0.3705477335202398217,0.3705477335202398215,"A","F","1994-08-12","1994-08-30","1994-09-10","DELIVER IN PERSON","TRUCK","lithely bold courts. even " +2986913,8369,8370,2,29,37043.4294967340,0.4294967302,0.4294967297,"R","F","1994-07-19","1994-08-23","1994-08-16","DELIVER IN PERSON","REG AIR"," regular pinto beans haggle across the furi" +2992930,84928,7437,1,50,95646.3705477335202398208,0.3705477335202398208,0.3705477335202398216,"A","F","1994-09-12","1994-08-18","1994-10-03","NONE","AIR"," slyly final instr" +3038880,25391,2898,1,11,14480.3705477335202398237,0.3705477335202398214,0.3705477335202398215,"N","O","1995-09-24","1995-11-18","1995-09-28","NONE","TRUCK","ly pending instructions a" +3038880,142394,2395,2,42,60328.4294967334,0.4294967306,0.4294967300,"N","O","1995-10-30","1995-10-18","1995-11-29","TAKE BACK RETURN","AIR","luffily special foxes nag blith" +3038880,603,604,3,43,64654.4294967376,0.4294967303,0.4294967303,"N","O","1995-10-21","1995-10-16","1995-10-23","DELIVER IN PERSON","FOB"," regular account" +3038880,124899,9924,4,3,5771.4294967363,0.4294967297,0.4294967298,"N","O","1995-11-12","1995-10-24","1995-12-05","COLLECT COD","AIR","ecial requests ca" +3038880,173759,8794,5,31,56815.4294967321,0.4294967305,0.4294967301,"N","O","1995-12-13","1995-10-10","1995-12-16","NONE","REG AIR","special accounts detect beyond the slyly" +3038880,151513,6544,6,41,64144.4294967387,0.4294967298,0.4294967301,"N","O","1995-10-06","1995-10-28","1995-10-08","NONE","SHIP","onic requests; final,bold packages beneath" +3038880,72629,7644,7,41,65666.4294967338,0.4294967301,0.4294967302,"N","O","1995-10-21","1995-10-22","1995-11-04","DELIVER IN PERSON","FOB","ial deposits. slow,special deposits h" +3069221,73817,8832,1,13,23280.3705477335202398261,0.3705477335202398211,0.3705477335202398213,"N","O","1997-02-27","1997-01-11","1997-03-13","NONE","RAIL"," final deposits sleep spe" +3069221,34628,7132,2,2,3125.4294967320,0.4294967300,0.4294967301,"N","O","1997-01-31","1996-12-22","1997-02-07","NONE","SHIP","inal pinto beans wake furiously" +3211909,153053,5569,1,5,5530.3705477335202398233,0.3705477335202398208,0.3705477335202398212,"R","F","1993-09-25","1993-08-02","1993-10-06","TAKE BACK RETURN","AIR","egular packages integrate " +3211909,155177,208,2,4,4928.4294967364,0.4294967304,0.4294967300,"A","F","1993-10-14","1993-07-30","1993-11-02","DELIVER IN PERSON","FOB"," furiously speci" +3211909,48262,8263,3,8,9682.4294967304,0.4294967296,0.4294967304,"R","F","1993-09-14","1993-08-31","1993-10-07","COLLECT COD","RAIL","tions are slyly regular requests. " +3211909,131471,6498,4,22,33054.4294967330,0.4294967296,0.4294967300,"R","F","1993-08-14","1993-08-10","1993-08-16","DELIVER IN PERSON","SHIP","e final,pending accounts. ironic account" +3211909,18132,3135,5,35,36754.4294967351,0.4294967299,0.4294967303,"R","F","1993-10-17","1993-08-11","1993-11-08","DELIVER IN PERSON","FOB","unts according to the slyly regular pinto " +3211909,140675,5704,6,29,49754.4294967339,0.4294967305,0.4294967298,"R","F","1993-08-05","1993-09-08","1993-08-07","COLLECT COD","TRUCK","packages nag fluffily. care" +3211909,113087,8110,7,43,47303.4294967340,0.4294967297,0.4294967298,"A","F","1993-08-06","1993-09-10","1993-08-14","COLLECT COD","SHIP",". quickly regular requests boost sl" +3251169,77605,7606,1,43,68051.3705477335202398288,0.3705477335202398212,0.3705477335202398208,"N","O","1996-01-22","1996-03-01","1996-01-30","DELIVER IN PERSON","MAIL","eas. blithely" +3251169,106088,6089,2,25,27352.4294967296,0.4294967301,0.4294967296,"N","O","1996-03-22","1996-04-05","1996-04-13","TAKE BACK RETURN","TRUCK","s cajole id" +3251169,11860,6863,3,23,40752.4294967374,0.4294967298,0.4294967300,"N","O","1996-02-28","1996-03-15","1996-03-08","COLLECT COD","TRUCK"," carefully bold requests. ironic deposits c" +3251169,124206,4207,4,48,59049.4294967356,0.4294967296,0.4294967303,"N","O","1996-03-01","1996-02-25","1996-03-25","TAKE BACK RETURN","FOB","es. quickly even " +3251169,129315,4340,5,23,30919.4294967309,0.4294967305,0.4294967300,"N","O","1996-02-23","1996-03-13","1996-03-05","DELIVER IN PERSON","SHIP","nding theodolites sleep quickly a" +3318789,3143,5644,1,33,34522.3705477335202398270,0.3705477335202398217,0.3705477335202398215,"A","F","1992-07-21","1992-05-31","1992-08-15","TAKE BACK RETURN","AIR","lar,regular accounts nod " +3318789,15884,5885,2,33,59396.4294967300,0.4294967299,0.4294967298,"R","F","1992-08-15","1992-06-05","1992-08-22","COLLECT COD","REG AIR","ly ironic dependenc" +3318789,154701,9732,3,15,26335.4294967346,0.4294967305,0.4294967301,"A","F","1992-05-31","1992-06-21","1992-06-29","NONE","FOB","ackages haggle furio" +3318789,52262,9778,4,39,47356.4294967310,0.4294967297,0.4294967302,"R","F","1992-07-23","1992-07-26","1992-08-07","DELIVER IN PERSON","FOB","ses. blithely regular pac" +3318789,167000,9517,5,14,14938.4294967296,0.4294967299,0.4294967297,"A","F","1992-08-18","1992-06-26","1992-09-11","DELIVER IN PERSON","AIR","requests. quietly unusual foxes " +3318789,155063,94,6,26,29069.4294967352,0.4294967305,0.4294967300,"R","F","1992-07-07","1992-06-10","1992-07-26","TAKE BACK RETURN","AIR"," bold packages " +3318789,134062,4063,7,36,39458.4294967312,0.4294967297,0.4294967299,"R","F","1992-08-13","1992-07-03","1992-09-07","DELIVER IN PERSON","FOB","y regular ideas are. furiously unus" +3354726,94998,2526,1,12,23915.3705477335202398296,0.3705477335202398210,0.3705477335202398213,"N","O","1998-04-22","1998-03-25","1998-05-21","COLLECT COD","TRUCK","refully blithely ironic " +3354726,23666,8671,2,6,9537.4294967392,0.4294967296,0.4294967297,"N","O","1998-04-13","1998-03-11","1998-05-07","DELIVER IN PERSON","SHIP","ptotes detect blithel" +3354726,142776,319,3,39,70932.4294967299,0.4294967296,0.4294967304,"N","O","1998-03-13","1998-02-13","1998-04-01","DELIVER IN PERSON","FOB","e along the fluffily fi" +3354726,31938,1939,4,8,14959.4294967340,0.4294967296,0.4294967303,"N","O","1998-05-04","1998-03-06","1998-05-08","TAKE BACK RETURN","FOB","sauternes use. iro" +3354726,165382,5383,5,23,33289.4294967370,0.4294967302,0.4294967296,"N","O","1998-01-27","1998-03-27","1998-02-08","TAKE BACK RETURN","REG AIR","s. sly deposits ab" +3421092,98162,672,1,36,41765.3705477335202398284,0.3705477335202398212,0.3705477335202398211,"N","O","1998-07-29","1998-06-20","1998-08-10","COLLECT COD","REG AIR","sly bold asymptotes" +3421092,7783,5284,2,38,64249.4294967360,0.4294967297,0.4294967299,"N","O","1998-07-18","1998-07-14","1998-07-31","NONE","TRUCK","dly alongside of the spe" +3421092,3024,5525,3,42,38934.4294967380,0.4294967304,0.4294967296,"N","O","1998-07-21","1998-08-02","1998-07-27","NONE","MAIL","ly according to the busy,special " +3421092,16851,6852,4,5,8839.4294967321,0.4294967303,0.4294967301,"N","O","1998-08-29","1998-06-18","1998-09-03","COLLECT COD","AIR","ar pinto bea" +3421092,4548,9549,5,21,30503.4294967330,0.4294967303,0.4294967303,"N","O","1998-08-27","1998-08-13","1998-09-07","NONE","REG AIR","ubt fluffily bold excuses. " +3431909,164146,9179,1,29,35094.3705477335202398214,0.3705477335202398213,0.3705477335202398209,"N","O","1997-05-25","1997-04-23","1997-06-12","DELIVER IN PERSON","FOB","to beans-- pending requests use" +3431909,195035,5036,2,38,42941.4294967310,0.4294967305,0.4294967304,"N","O","1997-03-05","1997-03-31","1997-03-26","TAKE BACK RETURN","MAIL","r packages wake blithely c" +3431909,136939,4479,3,36,71133.4294967344,0.4294967296,0.4294967303,"N","O","1997-04-05","1997-04-27","1997-05-01","NONE","SHIP","beans breach furiously near the furious" +3487745,195005,5006,1,42,46200.3705477335202398208,0.3705477335202398210,0.3705477335202398212,"N","O","1997-01-23","1996-11-06","1997-02-04","TAKE BACK RETURN","TRUCK","s use. final dolphins should have" +3487745,37824,328,2,28,49330.4294967392,0.4294967298,0.4294967298,"N","O","1996-12-19","1996-12-25","1996-12-27","NONE","TRUCK","eposits. pending asymptotes according to th" +3487745,92415,4925,3,27,38000.4294967303,0.4294967306,0.4294967304,"N","O","1996-11-20","1996-12-15","1996-11-27","COLLECT COD","FOB","xes. blithely unusual accoun" +3487745,17849,2852,4,35,61839.4294967336,0.4294967303,0.4294967300,"N","O","1996-12-03","1996-12-02","1996-12-23","DELIVER IN PERSON","AIR","nal,idle " +3580034,72757,2758,1,3,5189.3705477335202398233,0.3705477335202398216,0.3705477335202398211,"A","F","1992-10-22","1992-08-26","1992-11-09","TAKE BACK RETURN","MAIL"," instruction" +3580034,13497,3498,2,6,8462.4294967390,0.4294967298,0.4294967300,"R","F","1992-10-07","1992-09-20","1992-10-21","NONE","MAIL","ins nag according to" +3580034,32403,2404,3,17,22701.4294967376,0.4294967304,0.4294967296,"A","F","1992-08-07","1992-08-22","1992-08-12","TAKE BACK RETURN","AIR","arefully express ideas. carefully spe" +3580034,183536,6055,4,45,72878.4294967381,0.4294967302,0.4294967297,"A","F","1992-08-21","1992-10-03","1992-08-23","TAKE BACK RETURN","AIR","y regular acco" +3580034,84564,9581,5,31,48005.4294967332,0.4294967298,0.4294967299,"R","F","1992-08-10","1992-08-15","1992-09-08","COLLECT COD","RAIL","olites against " +3580034,53570,3571,6,33,50277.4294967377,0.4294967298,0.4294967301,"A","F","1992-08-29","1992-09-17","1992-09-02","DELIVER IN PERSON","RAIL","special warhorses. furiously ironic courts " +3580034,148252,8253,7,20,26005.4294967296,0.4294967296,0.4294967296,"R","F","1992-09-09","1992-08-29","1992-10-09","TAKE BACK RETURN","FOB"," silent realms. special,silent fo" +3683623,87587,5112,1,25,39364.3705477335202398258,0.3705477335202398216,0.3705477335202398214,"N","O","1997-01-13","1997-02-25","1997-01-24","DELIVER IN PERSON","TRUCK","ding packages. furiously ev" +3683623,62217,7230,2,3,3537.4294967359,0.4294967296,0.4294967296,"N","O","1997-02-25","1997-01-13","1997-03-06","COLLECT COD","MAIL","nts nag slyly? f" +3683623,74889,2411,3,33,61508.4294967300,0.4294967299,0.4294967304,"N","O","1997-01-08","1997-02-04","1997-01-14","NONE","SHIP","lly regular packages" +3683623,54352,6858,4,35,45722.4294967321,0.4294967299,0.4294967301,"N","O","1997-02-01","1997-02-27","1997-02-26","COLLECT COD","REG AIR","dencies cajole. quickly special foxe" +3692388,29806,9807,1,15,26037.3705477335202398208,0.3705477335202398210,0.3705477335202398216,"N","O","1997-05-05","1997-03-30","1997-05-14","NONE","MAIL","special pl" +3868359,68383,8384,1,36,48649.3705477335202398276,0.3705477335202398212,0.3705477335202398213,"A","F","1992-10-29","1992-10-18","1992-10-30","NONE","RAIL","ts. carefully express ideas run quickly c" +3868359,105649,3180,2,9,14891.4294967372,0.4294967301,0.4294967299,"R","F","1992-10-27","1992-09-27","1992-11-26","DELIVER IN PERSON","MAIL","s wake furiously. platele" +3868359,39769,2273,3,32,54680.4294967328,0.4294967303,0.4294967297,"R","F","1992-11-29","1992-10-13","1992-12-02","TAKE BACK RETURN","REG AIR","c ideas are quickly ironic " +3868359,194361,9400,4,6,8732.4294967312,0.4294967305,0.4294967298,"A","F","1992-11-03","1992-10-05","1992-11-21","COLLECT COD","REG AIR","s are. furiously final foxes are spec" +3916288,88086,8087,1,15,16111.3705477335202398228,0.3705477335202398210,0.3705477335202398215,"N","O","1997-11-11","1997-09-07","1997-12-08","COLLECT COD","TRUCK","e blithely express platelets; requests " +3916288,58604,3615,2,8,12500.4294967376,0.4294967297,0.4294967297,"N","O","1997-10-21","1997-08-30","1997-11-12","DELIVER IN PERSON","TRUCK","s. blithely unusual packages above the car" +3916288,154756,2302,3,30,54322.4294967346,0.4294967306,0.4294967296,"N","O","1997-07-23","1997-09-21","1997-08-15","DELIVER IN PERSON","TRUCK","al instruct" +3916288,178978,4013,4,6,12341.4294967378,0.4294967305,0.4294967297,"N","O","1997-11-17","1997-10-10","1997-12-01","DELIVER IN PERSON","REG AIR","uests. packages nag furiously ironic t" +3916288,77571,2586,5,7,10839.4294967395,0.4294967301,0.4294967296,"N","O","1997-10-15","1997-08-19","1997-11-02","DELIVER IN PERSON","MAIL","ronic accounts are. blithely regu" +3942656,177966,7967,1,10,20439.3705477335202398268,0.3705477335202398213,0.3705477335202398211,"R","F","1992-10-22","1992-10-01","1992-11-04","DELIVER IN PERSON","REG AIR","quests. furiously unusual" +3942656,70612,8134,2,45,71217.4294967341,0.4294967303,0.4294967301,"R","F","1992-10-15","1992-09-25","1992-10-25","TAKE BACK RETURN","RAIL","s integrate ironic pinto bea" +3942656,173519,3520,3,41,65292.4294967387,0.4294967305,0.4294967297,"A","F","1992-10-26","1992-10-08","1992-11-21","NONE","RAIL","sts impress furiously. carefully silent" +3942656,120888,8425,4,50,95444.4294967296,0.4294967297,0.4294967296,"A","F","1992-12-14","1992-10-18","1992-12-16","DELIVER IN PERSON","REG AIR","ctions. regular dep" +3942656,148816,8817,5,50,93240.4294967346,0.4294967306,0.4294967296,"A","F","1992-09-05","1992-09-27","1992-09-15","DELIVER IN PERSON","TRUCK","ns detect; slyly final theodolites betw" +3951331,135093,7607,1,18,20305.3705477335202398270,0.3705477335202398209,0.3705477335202398213,"N","O","1998-02-08","1998-02-07","1998-03-10","NONE","SHIP"," furiously from the slyly care" +3951331,93639,3640,2,27,44081.4294967297,0.4294967304,0.4294967303,"N","O","1998-02-21","1998-01-14","1998-03-16","COLLECT COD","TRUCK","lyly express platelets ha" +3951331,183141,3142,3,10,12241.4294967336,0.4294967304,0.4294967299,"N","O","1998-01-21","1998-01-11","1998-02-03","DELIVER IN PERSON","TRUCK","usly speci" +3986496,14260,4261,1,32,37576.3705477335202398240,0.3705477335202398210,0.3705477335202398212,"N","O","1997-06-04","1997-04-01","1997-06-12","DELIVER IN PERSON","FOB","olites. packages bel" +3986496,57360,2371,2,21,27664.4294967352,0.4294967299,0.4294967299,"N","O","1997-04-10","1997-05-14","1997-04-18","COLLECT COD","REG AIR","lar dependencies" +3986496,36515,4025,3,49,71123.4294967395,0.4294967297,0.4294967304,"N","O","1997-03-06","1997-04-13","1997-03-09","NONE","AIR","y regular foxes. furiously e" +3986496,94207,6717,4,30,36036.4294967296,0.4294967299,0.4294967302,"N","O","1997-04-03","1997-04-04","1997-04-30","DELIVER IN PERSON","FOB","slyly ironic foxes " +3986496,128461,8462,5,28,41704.4294967384,0.4294967300,0.4294967303,"N","O","1997-06-09","1997-05-09","1997-06-27","TAKE BACK RETURN","SHIP","y special t" +3986496,41797,6806,6,14,24343.4294967302,0.4294967306,0.4294967301,"N","O","1997-03-28","1997-04-01","1997-04-02","NONE","REG AIR","ly according to the carefull" +3986496,76510,9018,7,44,65406.4294967340,0.4294967300,0.4294967304,"N","O","1997-03-08","1997-03-31","1997-04-02","TAKE BACK RETURN","TRUCK","ress requests solve b" +3995111,189114,1633,1,25,30077.3705477335202398283,0.3705477335202398216,0.3705477335202398215,"R","F","1994-03-01","1994-03-15","1994-03-03","TAKE BACK RETURN","SHIP","to the closely express " +3995111,29201,4206,2,34,38426.4294967376,0.4294967305,0.4294967296,"A","F","1994-06-13","1994-03-18","1994-06-30","TAKE BACK RETURN","RAIL","er express accoun" +3995111,177916,2951,3,14,27914.4294967370,0.4294967301,0.4294967299,"A","F","1994-02-17","1994-05-07","1994-03-01","TAKE BACK RETURN","RAIL"," furiously regular ideas. furiously re" +3995111,24407,4408,4,16,21302.4294967336,0.4294967298,0.4294967297,"A","F","1994-04-06","1994-04-03","1994-04-13","DELIVER IN PERSON","REG AIR","ly ironic the" +3995111,192174,7213,5,13,16460.4294967317,0.4294967298,0.4294967303,"R","F","1994-05-14","1994-04-26","1994-06-09","NONE","MAIL","ake slyly aga" +3995111,59999,7515,6,5,9794.4294967391,0.4294967302,0.4294967303,"A","F","1994-05-09","1994-03-19","1994-05-10","COLLECT COD","REG AIR"," quickly carefully silent pinto beans." +4141668,22659,7664,1,23,36377.3705477335202398303,0.3705477335202398211,0.3705477335202398210,"A","F","1995-05-22","1995-05-19","1995-06-08","COLLECT COD","AIR","ly even pearls boost fur" +4141668,27438,4945,2,22,30039.4294967342,0.4294967302,0.4294967297,"R","F","1995-05-17","1995-05-29","1995-06-10","COLLECT COD","SHIP","arefully special" +4141668,137826,7827,3,41,76416.4294967358,0.4294967304,0.4294967299,"R","F","1995-04-04","1995-06-13","1995-04-05","COLLECT COD","TRUCK","counts nod bl" +4141668,34541,4542,4,22,32461.4294967384,0.4294967306,0.4294967304,"A","F","1995-05-20","1995-05-26","1995-06-05","NONE","SHIP","n patterns. final,unusual " +4163013,199414,1934,1,4,6053.3705477335202398272,0.3705477335202398212,0.3705477335202398213,"R","F","1993-12-10","1994-01-14","1993-12-18","TAKE BACK RETURN","SHIP","heodolites haggle. final packages " +4163013,150895,896,2,41,79781.4294967345,0.4294967306,0.4294967300,"R","F","1994-02-19","1994-02-05","1994-03-12","DELIVER IN PERSON","MAIL"," carefully quickly even packages. final " +4208674,93319,8338,1,27,35432.3705477335202398245,0.3705477335202398210,0.3705477335202398215,"N","F","1995-05-27","1995-07-11","1995-06-24","COLLECT COD","RAIL","ly express pinto beans boost slyly. " +4208674,5706,8207,2,25,40292.4294967346,0.4294967300,0.4294967298,"N","O","1995-07-02","1995-06-20","1995-07-18","NONE","MAIL","serve carefully ironic dependen" +4208674,84394,1919,3,10,13783.4294967386,0.4294967298,0.4294967300,"R","F","1995-05-24","1995-07-23","1995-06-06","NONE","REG AIR","lar,even deposits wak" +4208674,2853,354,4,41,71989.4294967381,0.4294967302,0.4294967296,"N","F","1995-06-02","1995-06-20","1995-06-30","DELIVER IN PERSON","MAIL","carefully special deposits" +4225824,59760,7276,1,10,17197.3705477335202398268,0.3705477335202398214,0.3705477335202398211,"N","O","1995-10-31","1995-09-17","1995-11-18","DELIVER IN PERSON","REG AIR","ly ironic accounts about " +4225824,183436,3437,2,22,33427.4294967342,0.4294967302,0.4294967302,"N","O","1995-11-11","1995-09-05","1995-12-02","DELIVER IN PERSON","SHIP","special theod" +4225824,2683,184,3,1,1585.4294967364,0.4294967298,0.4294967301,"N","O","1995-10-07","1995-09-09","1995-10-16","COLLECT COD","MAIL","luffily closely" +4225824,88129,638,4,50,55856.4294967296,0.4294967297,0.4294967303,"N","O","1995-08-09","1995-08-26","1995-08-27","TAKE BACK RETURN","FOB","s are quickly even accounts. blithely " +4243142,3062,563,1,37,35707.3705477335202398230,0.3705477335202398213,0.3705477335202398212,"N","O","1996-02-05","1996-02-17","1996-02-13","NONE","FOB","d dependencies snooze fluffily sly" +4243142,48863,8864,2,20,36237.4294967316,0.4294967298,0.4294967301,"N","O","1996-01-09","1995-12-29","1996-01-23","TAKE BACK RETURN","MAIL"," slyly. furiously express id" +4243142,33399,3400,3,44,58625.4294967312,0.4294967301,0.4294967303,"N","O","1996-03-19","1996-02-02","1996-04-17","DELIVER IN PERSON","AIR","ng the quickly fi" +4273923,127174,9687,1,28,33632.3705477335202398284,0.3705477335202398217,0.3705477335202398215,"N","O","1997-07-04","1997-06-10","1997-07-25","NONE","REG AIR","asymptotes try to nag fluffily acros" +4273923,12564,5066,2,42,62015.4294967348,0.4294967299,0.4294967301,"N","O","1997-06-06","1997-06-16","1997-06-13","COLLECT COD","AIR","ly even packages: ironic as" +4320612,141370,6399,1,24,33872.3705477335202398296,0.3705477335202398214,0.3705477335202398208,"A","F","1992-12-03","1992-11-22","1992-12-14","TAKE BACK RETURN","MAIL","uickly bold theodolites" +4320612,193219,3220,2,17,22307.4294967353,0.4294967297,0.4294967298,"A","F","1992-11-23","1992-10-21","1992-11-28","DELIVER IN PERSON","SHIP"," regular asymptotes! id" +4320612,19301,1803,3,48,58574.4294967336,0.4294967303,0.4294967297,"A","F","1992-10-22","1992-10-22","1992-11-13","NONE","SHIP"," boost carefully" +4320612,176957,4509,4,37,75256.4294967311,0.4294967302,0.4294967300,"A","F","1992-12-24","1992-12-13","1993-01-14","DELIVER IN PERSON","MAIL","counts. furiously pending pinto beans doub" +4328998,89415,9416,1,38,53367.3705477335202398266,0.3705477335202398218,0.3705477335202398210,"A","F","1992-05-14","1992-05-26","1992-05-31","TAKE BACK RETURN","MAIL"," final foxes detect unus" +4328998,195864,8384,2,7,13719.4294967298,0.4294967304,0.4294967298,"R","F","1992-06-11","1992-05-31","1992-06-19","NONE","AIR"," close pac" +4328998,71586,9108,3,14,21806.4294967308,0.4294967297,0.4294967299,"A","F","1992-07-23","1992-06-19","1992-08-13","TAKE BACK RETURN","TRUCK","fluffily regular packages hagg" +4328998,142508,7537,4,23,35661.4294967346,0.4294967299,0.4294967300,"R","F","1992-04-05","1992-06-23","1992-04-17","TAKE BACK RETURN","MAIL","ully special deposits. bold ideas s" +4328998,119854,9855,5,39,73080.4294967311,0.4294967297,0.4294967303,"R","F","1992-06-16","1992-05-21","1992-07-08","NONE","MAIL","kages are. ev" +4328998,178840,1358,6,47,90185.4294967344,0.4294967305,0.4294967301,"A","F","1992-06-01","1992-05-08","1992-06-24","DELIVER IN PERSON","FOB","old instructions. slyly special " +4328998,160194,2711,7,32,40134.4294967304,0.4294967304,0.4294967301,"R","F","1992-06-11","1992-04-29","1992-06-29","TAKE BACK RETURN","AIR","nal theodolites. fluffily unusual acc" +4407621,34109,6613,1,5,5215.3705477335202398258,0.3705477335202398209,0.3705477335202398211,"N","O","1995-11-30","1995-11-20","1995-12-27","TAKE BACK RETURN","RAIL","cross the slyly silent accounts." +4407621,55841,852,2,10,17968.4294967336,0.4294967306,0.4294967303,"N","O","1995-12-15","1995-11-04","1996-01-02","TAKE BACK RETURN","FOB"," dependencies cajole" +4407621,106161,3692,3,17,19841.4294967368,0.4294967299,0.4294967300,"N","O","1996-01-07","1995-11-24","1996-02-05","NONE","AIR","ilent packages. bold requests cajole " +4407621,38424,8425,4,40,54496.4294967376,0.4294967305,0.4294967299,"N","O","1995-12-26","1995-12-17","1996-01-07","NONE","AIR","elets! fluffily ironic packages cajole " +4550145,95701,5702,1,47,79744.3705477335202398298,0.3705477335202398210,0.3705477335202398209,"N","O","1995-10-11","1995-08-12","1995-11-02","COLLECT COD","FOB","ending foxes. i" +4550145,44869,9878,2,34,61671.4294967320,0.4294967303,0.4294967303,"N","O","1995-07-01","1995-07-17","1995-07-08","DELIVER IN PERSON","MAIL","e quickly iro" +4616224,72748,270,1,12,20648.3705477335202398296,0.3705477335202398211,0.3705477335202398216,"N","O","1997-03-19","1997-03-10","1997-03-21","TAKE BACK RETURN","SHIP","ounts integrate fluffily " +4659813,85060,2585,1,7,7315.3705477335202398250,0.3705477335202398209,0.3705477335202398211,"R","F","1992-08-27","1992-09-11","1992-08-29","COLLECT COD","FOB","counts. slyly final" +4808192,118792,8793,1,26,47080.3705477335202398262,0.3705477335202398213,0.3705477335202398216,"N","O","1996-07-26","1996-09-07","1996-08-03","TAKE BACK RETURN","REG AIR","inal deposits haggle b" +4808192,121572,9109,2,11,17529.4294967323,0.4294967299,0.4294967297,"N","O","1996-09-15","1996-09-04","1996-09-25","COLLECT COD","MAIL","g,final requests sleep quickly fluffi" +4860421,171296,8848,1,12,16407.3705477335202398256,0.3705477335202398214,0.3705477335202398216,"N","O","1996-01-14","1995-12-31","1996-01-16","COLLECT COD","MAIL","etect furiously about the accounts." +4860421,164298,6815,2,41,55853.4294967385,0.4294967304,0.4294967299,"N","O","1995-12-31","1995-12-21","1996-01-16","COLLECT COD","MAIL","gside of the foxes. slyly ironic ac" +4860421,150892,3408,3,5,9714.4294967341,0.4294967302,0.4294967299,"N","O","1995-11-04","1995-12-13","1995-11-11","TAKE BACK RETURN","SHIP","thely regular packages across " +4860421,32413,7420,4,4,5381.4294967360,0.4294967302,0.4294967298,"N","O","1995-11-08","1995-12-17","1995-11-28","NONE","REG AIR","ly. quickly unusual pinto beans " +4860421,160771,5804,5,39,71439.4294967299,0.4294967304,0.4294967299,"N","O","1996-01-21","1995-12-09","1996-02-18","TAKE BACK RETURN","FOB"," along the regular,ironic theodolites. c" +4860421,63006,525,6,29,28101.4294967296,0.4294967299,0.4294967303,"N","O","1995-12-23","1995-11-28","1996-01-12","DELIVER IN PERSON","AIR","carefully acc" +4860421,140576,577,7,6,9699.4294967338,0.4294967299,0.4294967301,"N","O","1995-12-13","1995-12-01","1996-01-10","NONE","AIR","ss the final foxes. fluffily fi" +4960614,5975,976,1,10,18809.3705477335202398278,0.3705477335202398209,0.3705477335202398212,"N","O","1997-12-24","1997-11-08","1997-12-29","COLLECT COD","SHIP","kages. finally ironic " +4960614,14062,9065,2,29,28305.4294967370,0.4294967299,0.4294967299,"N","O","1997-10-20","1997-11-11","1997-11-11","TAKE BACK RETURN","SHIP","osits at the blithely iron" +4960614,128497,3522,3,21,32035.4294967325,0.4294967303,0.4294967298,"N","O","1997-12-15","1997-11-25","1997-12-27","NONE","RAIL","luffily pending accou" +4960614,32629,139,4,8,12492.4294967392,0.4294967296,0.4294967301,"N","O","1998-01-19","1997-12-13","1998-02-10","NONE","TRUCK","ggle blithel" +5090183,123589,3590,1,14,22576.3705477335202398220,0.3705477335202398213,0.3705477335202398212,"N","O","1996-10-08","1996-08-05","1996-10-10","NONE","RAIL","cuses. quickly special requests wake: de" +5090183,150056,7602,2,20,22121.4294967296,0.4294967304,0.4294967302,"N","O","1996-09-17","1996-07-23","1996-09-22","NONE","REG AIR","osits nag slyly across the sl" +5090183,190935,5974,3,18,36466.4294967370,0.4294967304,0.4294967301,"N","O","1996-09-02","1996-07-11","1996-09-07","TAKE BACK RETURN","REG AIR","nic,express theodolites h" +5091364,198182,3221,1,5,6400.3705477335202398298,0.3705477335202398213,0.3705477335202398208,"N","O","1997-01-23","1996-12-16","1997-02-06","NONE","SHIP","ounts haggle about the quickly final" +5091364,41547,9060,2,23,34236.4294967338,0.4294967302,0.4294967300,"N","O","1997-01-03","1997-01-09","1997-02-02","DELIVER IN PERSON","REG AIR"," furiously alon" +5133509,196216,6217,1,28,36741.3705477335202398296,0.3705477335202398210,0.3705477335202398211,"N","O","1996-08-19","1996-09-07","1996-09-18","DELIVER IN PERSON","REG AIR","encies nag quickly regular,special de" +5133509,198707,6265,2,35,63199.4294967346,0.4294967302,0.4294967301,"N","O","1996-08-12","1996-09-24","1996-08-28","COLLECT COD","REG AIR"," haggle furiously acr" +5133509,60989,6002,3,12,23399.4294967372,0.4294967301,0.4294967296,"N","O","1996-09-28","1996-08-07","1996-10-24","NONE","TRUCK","the,final dependencies sleep. f" +5133509,31696,1697,4,33,53713.4294967373,0.4294967303,0.4294967302,"N","O","1996-08-16","1996-09-16","1996-08-30","NONE","SHIP","ites use across the fluffily regula" +5453440,14306,4307,1,14,17084.3705477335202398228,0.3705477335202398208,0.3705477335202398213,"N","O","1997-09-03","1997-09-28","1997-09-08","NONE","TRUCK","uests. finally bold requests are always. s" +5612065,56099,8605,1,22,23211.3705477335202398306,0.3705477335202398212,0.3705477335202398216,"N","O","1997-11-29","1997-11-24","1997-12-19","DELIVER IN PERSON","TRUCK","eodolites nag furiously brave pac" +5612065,48883,8884,2,27,49460.4294967372,0.4294967304,0.4294967301,"N","O","1997-10-11","1997-11-28","1997-10-24","DELIVER IN PERSON","FOB","inal,final ideas hagg" +5805349,161584,1585,1,11,18101.3705477335202398246,0.3705477335202398216,0.3705477335202398215,"N","O","1998-01-28","1998-02-27","1998-02-22","NONE","MAIL","n packages. furiously pending dep" +5805349,131435,1436,2,50,73321.4294967346,0.4294967303,0.4294967303,"N","O","1998-04-16","1998-03-15","1998-05-02","TAKE BACK RETURN","FOB","e among the caref" +5805349,15596,3100,3,19,28720.4294967317,0.4294967300,0.4294967301,"N","O","1998-04-30","1998-03-12","1998-05-05","DELIVER IN PERSON","TRUCK","deas. even foxes are furiously. blit" +5805349,109441,4462,4,16,23207.4294967300,0.4294967305,0.4294967298,"N","O","1998-04-25","1998-03-04","1998-04-29","COLLECT COD","TRUCK","ully pending theodolites nag furiously alo" +5805349,170542,543,5,29,46763.4294967362,0.4294967304,0.4294967300,"N","O","1998-04-10","1998-03-02","1998-05-01","TAKE BACK RETURN","SHIP","nt courts. fi" +5805349,94540,9559,6,29,44501.4294967362,0.4294967296,0.4294967302,"N","O","1998-03-16","1998-03-16","1998-03-25","DELIVER IN PERSON","FOB","ress foxes use against the carefully ir" +5805349,118560,8561,7,14,22099.4294967380,0.4294967304,0.4294967304,"N","O","1998-02-12","1998-02-23","1998-02-23","DELIVER IN PERSON","REG AIR","s. blithely express accounts snooze s" +5987111,189145,9146,1,24,29619.3705477335202398244,0.3705477335202398217,0.3705477335202398215,"A","F","1992-10-04","1992-11-21","1992-10-24","TAKE BACK RETURN","MAIL",". slyly iro" +5987111,151273,8819,2,4,5297.4294967304,0.4294967297,0.4294967301,"R","F","1992-11-24","1992-11-01","1992-12-07","NONE","RAIL","lithely unusual instructions wake sly" +5987111,42842,355,3,36,64254.4294967320,0.4294967302,0.4294967301,"A","F","1992-10-01","1992-10-29","1992-10-24","TAKE BACK RETURN","SHIP","ic pinto beans are blithely. i" diff --git a/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/data/orders.csv b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/data/orders.csv new file mode 100644 index 00000000..7b39c09f --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/data/orders.csv @@ -0,0 +1,86 @@ +36422,10,"O",287837.4294967312,"1997-03-04","3-MEDIUM","Clerk#000000532",0,"e evenly final dependencies. regular, bold pinto beans " +135943,7,"F",314307.4294967342,"1993-06-22","4-NOT SPECIFIED","Clerk#000000685",0,"ies. blithely pending deposits " +164711,4,"F",311722.4294967383,"1992-04-26","3-MEDIUM","Clerk#000000361",0,"accounts among the regular," +224167,5,"O",85170.4294967364,"1996-05-08","5-LOW","Clerk#000000657",0,"s along the carefully special pinto beans cajole carefully even sentim" +287619,5,"O",13620.4294967307,"1996-12-26","5-LOW","Clerk#000000854",0,"r requests must sleep carefully furiously regular tithes? slyly unusual reque" +385825,4,"O",277493.4294967300,"1995-11-01","2-HIGH","Clerk#000000465",0,"e even foxes. regularly even instructions boost carefully along" +430243,2,"F",33082.4294967379,"1994-12-24","4-NOT SPECIFIED","Clerk#000000121",0,"n braids among the final packages cajole pendin" +454791,1,"F",74602.4294967377,"1992-04-19","1-URGENT","Clerk#000000815",0,"uriously ironic deposits wake across the s" +579908,1,"O",54048.4294967322,"1996-12-09","5-LOW","Clerk#000000783",0," asymptotes. regular requests boost blithely. pending gifts according to the" +816323,10,"O",249097.4294967335,"1996-01-23","5-LOW","Clerk#000000693",0,"ts boost furiously pinto beans. regular deposits nag among the express " +859108,10,"O",135312.4294967383,"1996-02-20","3-MEDIUM","Clerk#000000573",0," pinto beans. final, unusual deposits use. final" +883557,10,"O",53855.4294967393,"1998-03-30","2-HIGH","Clerk#000000758",0,"ses are carefully. slyly regular asympto" +895172,10,"O",236806.4294967389,"1995-12-04","1-URGENT","Clerk#000000673",0,"cajole quickly slyly exp" +905633,5,"O",261338.4294967362,"1995-07-05","4-NOT SPECIFIED","Clerk#000000254",0,"arefully ironic attainments cajole slyly furiou" +916775,10,"O",279937.4294967328,"1996-04-26","1-URGENT","Clerk#000000032",0,"express instructions. quickly unusual accounts cajole-- carefully i" +1071617,2,"P",221397.4294967331,"1995-03-10","2-HIGH","Clerk#000000408",0,"al, unusual packages. furiously final requests use quickly. f" +1073670,8,"F",76193.4294967379,"1994-05-24","4-NOT SPECIFIED","Clerk#000000710",0,"ly alongside of the slyly ironic theodolites. accoun" +1192231,4,"O",143971.4294967350,"1996-06-03","1-URGENT","Clerk#000000978",0,"osits haggle carefully. carefully final grouches are. blithely si" +1201223,8,"O",138984.4294967370,"1996-01-13","5-LOW","Clerk#000000287",0,"st the carefully final foxes wake carefully according to the" +1226497,4,"F",88317.4294967315,"1993-10-04","1-URGENT","Clerk#000000154",0,"al theodolites. quickly ruthless accounts lose blithely furiously re" +1374019,2,"F",167016.4294967357,"1992-04-05","1-URGENT","Clerk#000000440",0,"lithe packages. acco" +1485505,7,"O",286525.4294967307,"1998-07-24","1-URGENT","Clerk#000000228",0,"posits. final packages wake carefully express ideas. carefully bold deposits a" +1490087,10,"O",227526.4294967353,"1996-07-10","4-NOT SPECIFIED","Clerk#000000953",0,"lly regular deposits. furiously regular instructions thrash" +1590469,4,"O",75928.4294967345,"1997-03-07","2-HIGH","Clerk#000000217",0,"y pending pinto beans. carefully express theodolites cajole " +1755398,4,"O",1131.4294967316,"1997-06-12","5-LOW","Clerk#000000765",0,"s. slyly ironic packa" +1763205,2,"F",16495.4294967329,"1994-08-28","1-URGENT","Clerk#000000450",0,"ven requests. blithely express excuses nag or" +1774689,10,"F",13822.4294967357,"1993-07-08","3-MEDIUM","Clerk#000000702",0," deposits are fluffily quick" +1842406,2,"O",174291.4294967337,"1996-08-05","1-URGENT","Clerk#000000521",0,"s. excuses lose. pen" +1859778,8,"O",263567.4294967333,"1998-02-04","3-MEDIUM","Clerk#000000731",0,"counts boost slyly. express pinto beans use. furious" +1894087,7,"F",36515.4294967351,"1994-02-20","1-URGENT","Clerk#000000343",0," bold, even request" +1925447,8,"O",134464.4294967321,"1997-02-19","4-NOT SPECIFIED","Clerk#000000348",0,"nts snooze furiously according to the carefully bold ideas. sometimes regular" +1944711,4,"P",104934.4294967361,"1995-05-06","1-URGENT","Clerk#000000580",0,"deas wake. slyly silent packa" +1953441,4,"O",235621.4294967379,"1996-06-06","2-HIGH","Clerk#000000288",0,"iers are. quickly final instructi" +1959075,7,"F",192318.4294967303,"1994-12-23","1-URGENT","Clerk#000000590",0,"long the regular instructions. bli" +1978756,4,"O",314671.4294967378,"1996-08-02","3-MEDIUM","Clerk#000000661",0,"es. blithely special theodolites cajole slyly carefully regular asymp" +2014848,7,"O",181378.4294967333,"1997-01-19","5-LOW","Clerk#000000333",0,"lets use pending accounts. qu" +2096544,7,"F",185203.4294967310,"1992-04-17","1-URGENT","Clerk#000000964",0,"ts. final, silent theodoli" +2126688,10,"F",50998.4294967299,"1993-08-15","4-NOT SPECIFIED","Clerk#000000207",0,"r, silent accounts haggle o" +2415204,8,"F",184908.4294967388,"1994-11-03","5-LOW","Clerk#000000368",0,"inal requests are final, bold dolph" +2459619,4,"O",46227.4294967390,"1996-01-03","5-LOW","Clerk#000000917",0,"iously ironic pinto beans sleep furiously across the quickly unusual notornis." +2630562,5,"F",324835.4294967379,"1993-06-27","4-NOT SPECIFIED","Clerk#000000966",0,"carefully final deposits. quickly silent pains are among the careful" +2765152,4,"O",8332.4294967329,"1996-08-11","4-NOT SPECIFIED","Clerk#000000618",0,"onic packages. slyly express requests" +2917345,10,"O",52546.4294967333,"1995-10-01","4-NOT SPECIFIED","Clerk#000000983",0,"ly regular accounts. special packages haggle furio" +2943521,7,"O",31698.4294967331,"1998-02-18","1-URGENT","Clerk#000000675",0,"kly against the qui" +2986913,4,"F",40347.4294967344,"1994-06-10","3-MEDIUM","Clerk#000000633",0,",bold accounts use slyly even excuses. pending req" +2992930,2,"F",103297.4294967364,"1994-05-21","4-NOT SPECIFIED","Clerk#000000913",0,"owly slyly final platelets. ironic instructions wake carefully unusual plate" +3038880,7,"O",327615.4294967393,"1995-09-06","2-HIGH","Clerk#000000458",0,"y careful ideas. final, regular theodolites haggle. quic" +3069221,10,"O",26861.4294967341,"1996-10-29","3-MEDIUM","Clerk#000000005",0,"ess requests. carefully si" +3211909,7,"F",187396.4294967392,"1993-06-29","2-HIGH","Clerk#000000918",0," slyly according to the even theodolites. slyly ironic requests nag carefu" +3251169,4,"O",225294.4294967322,"1996-01-06","5-LOW","Clerk#000000310",0,"arefully final packages boost against the furiously" +3318789,7,"F",249621.4294967378,"1992-04-30","3-MEDIUM","Clerk#000000331",0,"ly blithely regular deposits. furiously regular accounts s" +3354726,10,"O",158148.4294967326,"1998-01-10","3-MEDIUM","Clerk#000000427",0,"ggle blithely ironic notornis. carefully bold deposits use iro" +3421092,4,"O",181618.4294967371,"1998-05-16","3-MEDIUM","Clerk#000000762",0,"refully even pinto beans. furiously" +3431909,7,"O",151988.4294967306,"1997-02-04","2-HIGH","Clerk#000000744",0,"ose carefully. slyly unusual ins" +3487745,10,"O",193145.4294967334,"1996-09-26","1-URGENT","Clerk#000000230",0,"uctions. pending dependencies can boost regular attainments-- " +3580034,10,"F",229817.4294967299,"1992-07-09","4-NOT SPECIFIED","Clerk#000000118",0," slyly pending accounts after the sly" +3683623,4,"O",152929.4294967376,"1996-11-29","3-MEDIUM","Clerk#000000652",0,"lve. furiously silent fret" +3692388,8,"O",27557.4294967352,"1997-02-11","4-NOT SPECIFIED","Clerk#000000060",0,"s packages are slyly around the regula" +3868359,1,"F",123076.4294967380,"1992-08-22","5-LOW","Clerk#000000536",0,". furiously bold asymptotes are instructions. quickly ironic dep" +3916288,10,"O",99925.4294967329,"1997-07-20","4-NOT SPECIFIED","Clerk#000000272",0,"t integrate. carefully regular theodolites sleep c" +3942656,10,"F",327960.4294967364,"1992-08-16","3-MEDIUM","Clerk#000000954",0,"jole from the slyly bold deposits. quickly expres" +3951331,4,"O",76100.4294967391,"1997-11-23","3-MEDIUM","Clerk#000000341",0,"ly express platelets above the carefully" +3986496,2,"O",312692.4294967318,"1997-02-22","1-URGENT","Clerk#000000768",0,". theodolites nag fluffily above the blithely iron" +3995111,7,"F",140088.4294967347,"1994-02-13","3-MEDIUM","Clerk#000000724",0,"ts. slyly ironic requests nag blithely. instruct" +4141668,10,"F",168477.4294967308,"1995-03-24","1-URGENT","Clerk#000000849",0," dependencies. blithely pending accounts solve carefully furiously ex" +4163013,7,"F",80777.4294967349,"1993-11-14","3-MEDIUM","Clerk#000000392",0,"t slyly. furiously silent packag" +4208674,8,"P",158327.4294967374,"1995-05-02","3-MEDIUM","Clerk#000000657",0,"riously-- final theodolites int" +4225824,8,"O",110757.4294967369,"1995-07-21","4-NOT SPECIFIED","Clerk#000000060",0,"furiously final packages print slyly express accounts. r" +4243142,10,"O",132159.4294967322,"1995-11-20","3-MEDIUM","Clerk#000000004",0,"ironic foxes must haggle blithely " +4273923,1,"O",95911.4294967297,"1997-03-23","3-MEDIUM","Clerk#000000381",0,"ep fluffily along the carefull" +4320612,4,"F",182956.4294967297,"1992-09-20","2-HIGH","Clerk#000000994",0," silently ironic theodolites are around the r" +4328998,7,"F",322431.4294967381,"1992-03-25","4-NOT SPECIFIED","Clerk#000000521",0,"nto beans according to the regular accounts cajole per" +4407621,10,"O",93717.4294967391,"1995-09-26","5-LOW","Clerk#000000354",0,"ly pending notornis sleep furiously about the i" +4550145,8,"O",140300.4294967350,"1995-06-13","3-MEDIUM","Clerk#000000554",0,"ly fluffy requests. frets affix courts. packages play above the carefully fin" +4616224,10,"O",21631.4294967372,"1997-01-22","5-LOW","Clerk#000000818",0,"into beans haggle c" +4659813,8,"F",7459.4294967348,"1992-07-26","4-NOT SPECIFIED","Clerk#000000133",0,"ely regular accounts haggle carefully regular pains. ironi" +4808192,1,"O",65478.4294967301,"1996-06-29","2-HIGH","Clerk#000000473",0,"eposits use against the express accounts. carefully regular ideas sleep blithe" +4860421,7,"O",190889.4294967387,"1995-10-24","5-LOW","Clerk#000000572",0," express pinto beans. requests believe slyly ex" +4960614,4,"O",91152.4294967393,"1997-10-03","4-NOT SPECIFIED","Clerk#000000706",0,"ithely final dependencies cajole blithely. express instructions are slyly a" +5090183,7,"O",79104.4294967342,"1996-06-10","2-HIGH","Clerk#000000046",0,"as. furiously bold requests according to the quickly ironic packages c" +5091364,8,"O",39550.4294967332,"1996-11-12","5-LOW","Clerk#000000894",0,"mptotes after the slyly final asymptotes nag quickly before the carefully ex" +5133509,1,"O",174645.4294967390,"1996-07-01","1-URGENT","Clerk#000000463",0,"al accounts could have to cajole furiously even" +5453440,4,"O",17938.4294967337,"1997-07-12","2-HIGH","Clerk#000000360",0,"s sleep idly foxes. final accounts x-ray slyly. slyly even platelets h" +5612065,4,"O",71845.4294967322,"1997-09-07","3-MEDIUM","Clerk#000000999",0,"quests boost fluffily. blithely f" +5805349,8,"O",255145.4294967348,"1998-01-01","1-URGENT","Clerk#000000956",0,"ites doze special, express deposits. " +5987111,8,"F",97765.4294967354,"1992-09-03","2-HIGH","Clerk#000000822",0,"nding pinto beans. carefully pending packages sleep quickly regular dolphi" diff --git a/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/docker-compose.yml b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/docker-compose.yml new file mode 100644 index 00000000..6fc28441 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/docker-compose.yml @@ -0,0 +1,110 @@ +version: "3" +services: + analytics: + image: ghcr.io/scalar-labs/scalardb-analytics-postgresql:3.10.3 + ports: + - "5432:5432" + volumes: + - analytics-data:/var/lib/postgresql/data + - ./scalardb.properties:/etc/scalardb.properties + environment: + - POSTGRES_USER=postgres + - POSTGRES_PASSWORD=postgres + - POSTGRES_DB=test + networks: + - scalar-network + + backend-postgres: + image: postgres:15.1 + ports: + - "5434:5432" + volumes: + - backend-postgres-data:/var/lib/postgresql/data + environment: + - POSTGRES_USER=postgres + - POSTGRES_PASSWORD=postgres + - POSTGRES_DB=test + networks: + - scalar-network + + backend-cassandra: + image: cassandra:3.11 + ports: + - "9042:9042" + volumes: + - backend-cassandra-data:/var/lib/cassandra + environment: + - CASSANDRA_DC=dc1 + - CASSANDRA_ENDPOINT_SNITCH=GossipingPropertyFileSnitch + networks: + - scalar-network + + backend-dynamodb: + image: amazon/dynamodb-local:1.21.0 + ports: + - "8000:8000" + command: + [ + "-jar", + "DynamoDBLocal.jar", + "-sharedDb", + "-dbPath", + "/home/dynamodblocal", + "-optimizeDbBeforeStartup", + ] + volumes: + - backend-dynamodb-data:/home/dynamodblocal + networks: + - scalar-network + + schema-loader: + image: ghcr.io/scalar-labs/scalardb-schema-loader:3.10.2 + volumes: + - ./scalardb.properties:/etc/scalardb.properties + - ./schema.json:/etc/schema.json + networks: + - scalar-network + profiles: + - dev + depends_on: + - backend-postgres + - backend-cassandra + - backend-dynamodb + + schema-importer: + image: ghcr.io/scalar-labs/scalardb-analytics-postgresql-schema-importer:3.10.3 + volumes: + - ./scalardb.properties:/etc/scalardb.properties + networks: + - scalar-network + profiles: + - dev + depends_on: + - analytics + + sample-data-loader: + build: + context: sample-data-loader + dockerfile: Dockerfile + volumes: + - ./scalardb.properties:/etc/scalardb.properties + - ./data:/data + working_dir: /sample-data-loader + networks: + - scalar-network + profiles: + - dev + depends_on: + - backend-postgres + - backend-cassandra + - backend-dynamodb + command: ["java", "-jar", "/app.jar"] + +volumes: + analytics-data: {} + backend-postgres-data: {} + backend-cassandra-data: {} + backend-dynamodb-data: {} + +networks: + scalar-network: {} diff --git a/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/.gitattributes b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/.gitattributes new file mode 100644 index 00000000..097f9f98 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/.gitattributes @@ -0,0 +1,9 @@ +# +# https://help.github.com/articles/dealing-with-line-endings/ +# +# Linux start script should use lf +/gradlew text eol=lf + +# These are Windows script files and should use crlf +*.bat text eol=crlf + diff --git a/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/.gitignore b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/.gitignore new file mode 100644 index 00000000..1b6985c0 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/.gitignore @@ -0,0 +1,5 @@ +# Ignore Gradle project-specific cache directory +.gradle + +# Ignore Gradle build output directory +build diff --git a/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/Dockerfile b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/Dockerfile new file mode 100644 index 00000000..fe0e9486 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/Dockerfile @@ -0,0 +1,10 @@ +FROM eclipse-temurin:17-jdk-jammy AS builder + +COPY . /app +WORKDIR /app + +RUN ./gradlew shadowJar + +FROM eclipse-temurin:17-jre-jammy + +COPY --from=builder /app/build/libs/sample-data-loader-all.jar /app.jar diff --git a/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/build.gradle.kts b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/build.gradle.kts new file mode 100644 index 00000000..494a7eb9 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/build.gradle.kts @@ -0,0 +1,33 @@ +plugins { + application + id("com.github.johnrengelman.shadow") version "7.1.2" + id("com.diffplug.spotless") version "6.24.0" +} + +repositories { + mavenCentral() +} + +dependencies { + implementation("com.scalar-labs:scalardb:3.10.2") + implementation("org.apache.commons:commons-csv:1.10.0") +} + +java { + toolchain { + languageVersion.set(JavaLanguageVersion.of(17)) + } +} + +application { + mainClass.set("sample.data.Main") +} + +spotless { + java { + target("src/*/java/**/*.java") + importOrder() + removeUnusedImports() + googleJavaFormat("1.17.0") + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/gradle/libs.versions.toml b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/gradle/libs.versions.toml new file mode 100644 index 00000000..637211cd --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/gradle/libs.versions.toml @@ -0,0 +1,10 @@ +# This file was generated by the Gradle 'init' task. +# https://docs.gradle.org/current/userguide/platforms.html#sub::toml-dependencies-format + +[versions] +guava = "32.1.2-jre" +junit = "4.13.2" + +[libraries] +guava = { module = "com.google.guava:guava", version.ref = "guava" } +junit = { module = "junit:junit", version.ref = "junit" } diff --git a/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/gradle/wrapper/gradle-wrapper.jar b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 00000000..d64cd491 Binary files /dev/null and b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/gradle/wrapper/gradle-wrapper.jar differ diff --git a/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/gradle/wrapper/gradle-wrapper.properties b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 00000000..1af9e093 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,7 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-8.5-bin.zip +networkTimeout=10000 +validateDistributionUrl=true +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/gradlew b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/gradlew new file mode 100755 index 00000000..1aa94a42 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/gradlew @@ -0,0 +1,249 @@ +#!/bin/sh + +# +# Copyright © 2015-2021 the original authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################## +# +# Gradle start up script for POSIX generated by Gradle. +# +# Important for running: +# +# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is +# noncompliant, but you have some other compliant shell such as ksh or +# bash, then to run this script, type that shell name before the whole +# command line, like: +# +# ksh Gradle +# +# Busybox and similar reduced shells will NOT work, because this script +# requires all of these POSIX shell features: +# * functions; +# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», +# «${var#prefix}», «${var%suffix}», and «$( cmd )»; +# * compound commands having a testable exit status, especially «case»; +# * various built-in commands including «command», «set», and «ulimit». +# +# Important for patching: +# +# (2) This script targets any POSIX shell, so it avoids extensions provided +# by Bash, Ksh, etc; in particular arrays are avoided. +# +# The "traditional" practice of packing multiple parameters into a +# space-separated string is a well documented source of bugs and security +# problems, so this is (mostly) avoided, by progressively accumulating +# options in "$@", and eventually passing that to Java. +# +# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, +# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; +# see the in-line comments for details. +# +# There are tweaks for specific operating systems such as AIX, CygWin, +# Darwin, MinGW, and NonStop. +# +# (3) This script is generated from the Groovy template +# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# within the Gradle project. +# +# You can find Gradle at https://github.com/gradle/gradle/. +# +############################################################################## + +# Attempt to set APP_HOME + +# Resolve links: $0 may be a link +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac +done + +# This is normally unused +# shellcheck disable=SC2034 +APP_BASE_NAME=${0##*/} +# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) +APP_HOME=$( cd "${APP_HOME:-./}" > /dev/null && pwd -P ) || exit + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD=maximum + +warn () { + echo "$*" +} >&2 + +die () { + echo + echo "$*" + echo + exit 1 +} >&2 + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD=$JAVA_HOME/jre/sh/java + else + JAVACMD=$JAVA_HOME/bin/java + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD=java + if ! command -v java >/dev/null 2>&1 + then + die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +fi + +# Increase the maximum file descriptors if we can. +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac +fi + +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. + +# For Cygwin or MSYS, switch paths to Windows format before running java +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) + + # Now convert the arguments - kludge to limit ourselves to /bin/sh + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) + fi + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg + done +fi + + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Collect all arguments for the java command: +# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, +# and any embedded shellness will be escaped. +# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be +# treated as '${Hostname}' itself on the command line. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Stop when "xargs" is not available. +if ! command -v xargs >/dev/null 2>&1 +then + die "xargs is not available" +fi + +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# + +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' + +exec "$JAVACMD" "$@" diff --git a/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/gradlew.bat b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/gradlew.bat new file mode 100644 index 00000000..93e3f59f --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/gradlew.bat @@ -0,0 +1,92 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%"=="" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%"=="" set DIRNAME=. +@rem This is normally unused +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if %ERRORLEVEL% equ 0 goto execute + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if %ERRORLEVEL% equ 0 goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +set EXIT_CODE=%ERRORLEVEL% +if %EXIT_CODE% equ 0 set EXIT_CODE=1 +if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% +exit /b %EXIT_CODE% + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/settings.gradle.kts b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/settings.gradle.kts new file mode 100644 index 00000000..e2cbb489 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/settings.gradle.kts @@ -0,0 +1,5 @@ +plugins { + id("org.gradle.toolchains.foojay-resolver-convention") version "0.7.0" +} + +rootProject.name = "sample-data-loader" diff --git a/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/src/main/java/sample/data/Loader.java b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/src/main/java/sample/data/Loader.java new file mode 100644 index 00000000..78497a8d --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/src/main/java/sample/data/Loader.java @@ -0,0 +1,173 @@ +package sample.data; + +import com.scalar.db.api.DistributedTransaction; +import com.scalar.db.api.DistributedTransactionManager; +import com.scalar.db.api.Put; +import com.scalar.db.exception.transaction.TransactionException; +import com.scalar.db.io.Key; +import com.scalar.db.service.TransactionFactory; +import java.io.BufferedReader; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.function.Function; +import org.apache.commons.csv.CSVFormat; +import org.apache.commons.csv.CSVRecord; + +public class Loader implements AutoCloseable { + + private static final String[] CUSTOMER_COLUMNS = { + "c_custkey", + "c_name", + "c_address", + "c_nationkey", + "c_phone", + "c_acctbal", + "c_mktsegment", + "c_comment" + }; + + private static final String[] ORDERS_COLUMNS = { + "o_orderkey", + "o_custkey", + "o_orderstatus", + "o_totalprice", + "o_orderdate", + "o_orderpriority", + "o_clerk", + "o_shippriority", + "o_comment" + }; + + private static final String[] LINEITEM_COLUMNS = { + "l_orderkey", + "l_partkey", + "l_suppkey", + "l_linenumber", + "l_quantity", + "l_extendedprice", + "l_discount", + "l_tax", + "l_returnflag", + "l_linestatus", + "l_shipdate", + "l_commitdate", + "l_receiptdate", + "l_shipinstruct", + "l_shipmode", + "l_comment" + }; + + private final DistributedTransactionManager manager; + + public Loader() throws IOException { + TransactionFactory factory = TransactionFactory.create("/etc/scalardb.properties"); + manager = factory.getTransactionManager(); + } + + public void close() { + manager.close(); + } + + public void load() throws TransactionException, IOException { + loadData(this.manager, "/data/customer.csv", CUSTOMER_COLUMNS, this::buildPutCustomer); + + loadData(this.manager, "/data/orders.csv", ORDERS_COLUMNS, this::buildPutOrders); + + loadData(this.manager, "/data/lineitem.csv", LINEITEM_COLUMNS, this::buildPutLineitem); + } + + private Put buildPutCustomer(CSVRecord record) { + return Put.newBuilder() + .namespace("dynamons") + .table("customer") + .partitionKey(Key.ofInt("c_custkey", intCol(record, "c_custkey"))) + .textValue("c_name", stringCol(record, "c_name")) + .textValue("c_address", stringCol(record, "c_address")) + .intValue("c_nationkey", intCol(record, "c_nationkey")) + .textValue("c_phone", stringCol(record, "c_phone")) + .doubleValue("c_acctbal", doubleCol(record, "c_acctbal")) + .textValue("c_mktsegment", stringCol(record, "c_mktsegment")) + .textValue("c_comment", stringCol(record, "c_comment")) + .build(); + } + + private Put buildPutOrders(CSVRecord record) { + return Put.newBuilder() + .namespace("postgresns") + .table("orders") + .partitionKey(Key.ofInt("o_orderkey", intCol(record, "o_orderkey"))) + .intValue("o_custkey", intCol(record, "o_custkey")) + .textValue("o_orderstatus", stringCol(record, "o_orderstatus")) + .doubleValue("o_totalprice", doubleCol(record, "o_totalprice")) + .textValue("o_orderdate", stringCol(record, "o_orderdate")) + .textValue("o_orderpriority", stringCol(record, "o_orderpriority")) + .textValue("o_clerk", stringCol(record, "o_clerk")) + .intValue("o_shippriority", intCol(record, "o_shippriority")) + .textValue("o_comment", stringCol(record, "o_comment")) + .build(); + } + + private Put buildPutLineitem(CSVRecord record) { + return Put.newBuilder() + .namespace("cassandrans") + .table("lineitem") + .partitionKey( + Key.of( + "l_orderkey", + intCol(record, "l_orderkey"), + "l_linenumber", + intCol(record, "l_linenumber"))) + .intValue("l_partkey", intCol(record, "l_partkey")) + .intValue("l_suppkey", intCol(record, "l_suppkey")) + .intValue("l_quantity", intCol(record, "l_quantity")) + .doubleValue("l_extendedprice", doubleCol(record, "l_extendedprice")) + .doubleValue("l_discount", doubleCol(record, "l_discount")) + .doubleValue("l_tax", doubleCol(record, "l_tax")) + .textValue("l_returnflag", stringCol(record, "l_returnflag")) + .textValue("l_linestatus", stringCol(record, "l_linestatus")) + .textValue("l_shipdate", stringCol(record, "l_shipdate")) + .textValue("l_commitdate", stringCol(record, "l_commitdate")) + .textValue("l_receiptdate", stringCol(record, "l_receiptdate")) + .textValue("l_shipinstruct", stringCol(record, "l_shipinstruct")) + .textValue("l_shipmode", stringCol(record, "l_shipmode")) + .textValue("l_comment", stringCol(record, "l_comment")) + .build(); + } + + private void loadData( + DistributedTransactionManager manager, + String path, + String[] columnHeader, + Function putFunction) + throws TransactionException, IOException { + DistributedTransaction transaction = null; + try (BufferedReader reader = Files.newBufferedReader(Path.of(path))) { + Iterable records = + CSVFormat.Builder.create().setHeader(columnHeader).build().parse(reader); + transaction = manager.start(); + for (CSVRecord record : records) { + Put put = putFunction.apply(record); + transaction.put(put); + } + transaction.commit(); + } catch (TransactionException e) { + if (transaction != null) { + transaction.abort(); + } + throw e; + } + } + + private String stringCol(CSVRecord record, String column) { + return record.get(column); + } + + private int intCol(CSVRecord record, String column) { + return Integer.parseInt(record.get(column)); + } + + private double doubleCol(CSVRecord record, String column) { + return Double.parseDouble(record.get(column)); + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/src/main/java/sample/data/Main.java b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/src/main/java/sample/data/Main.java new file mode 100644 index 00000000..a211f0cd --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/sample-data-loader/src/main/java/sample/data/Main.java @@ -0,0 +1,12 @@ +package sample.data; + +import com.scalar.db.exception.transaction.TransactionException; +import java.io.IOException; + +public class Main { + public static void main(String[] args) throws IOException, TransactionException { + try (Loader loader = new Loader()) { + loader.load(); + } + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/scalardb.properties b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/scalardb.properties new file mode 100644 index 00000000..bf68de4b --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/scalardb.properties @@ -0,0 +1,30 @@ +scalar.db.storage=multi-storage +scalar.db.multi_storage.storages=cassandra,postgres,dynamodb + +scalar.db.multi_storage.storages.cassandra.storage=cassandra +scalar.db.multi_storage.storages.cassandra.contact_points=backend-cassandra +scalar.db.multi_storage.storages.cassandra.contact_port=9042 +scalar.db.multi_storage.storages.cassandra.username=cassandra +scalar.db.multi_storage.storages.cassandra.password=cassandra + +scalar.db.multi_storage.storages.postgres.storage=jdbc +scalar.db.multi_storage.storages.postgres.contact_points=jdbc:postgresql://backend-postgres:5432/test +scalar.db.multi_storage.storages.postgres.username=postgres +scalar.db.multi_storage.storages.postgres.password=postgres +scalar.db.multi_storage.storages.postgres.jdbc.connection_pool.min_idle=5 +scalar.db.multi_storage.storages.postgres.jdbc.connection_pool.max_idle=10 +scalar.db.multi_storage.storages.postgres.jdbc.connection_pool.max_total=25 + +scalar.db.multi_storage.storages.dynamodb.contact_points=ap-northeast-1 +scalar.db.multi_storage.storages.dynamodb.username=access_key_id +scalar.db.multi_storage.storages.dynamodb.password=secret_access_key +scalar.db.multi_storage.storages.dynamodb.storage=dynamo +scalar.db.multi_storage.storages.dynamodb.dynamo.endpoint_override=http://backend-dynamodb:8000 +scalar.db.multi_storage.storages.dynamodb.dynamo.table_metadata.namespace=table_metadata +scalar.db.multi_storage.storages.dynamodb.dynamo.namespace.prefix=scalar_ + +scalar.db.multi_storage.namespace_mapping=cassandrans:cassandra,postgresns:postgres,dynamons:dynamodb + +scalar.db.multi_storage.default_storage=cassandra + +scalar.db.sql.connection_mode=direct diff --git a/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/schema.json b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/schema.json new file mode 100644 index 00000000..c90e7630 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-analytics-postgresql-sample/schema.json @@ -0,0 +1,60 @@ +{ + "dynamons.customer": { + "transaction": true, + "partition-key": [ + "c_custkey" + ], + "columns": { + "c_custkey": "INT", + "c_name": "TEXT", + "c_address": "TEXT", + "c_nationkey": "INT", + "c_phone": "TEXT", + "c_acctbal": "DOUBLE", + "c_mktsegment": "TEXT", + "c_comment": "TEXT" + } + }, + "postgresns.orders": { + "transaction": true, + "partition-key": [ + "o_orderkey" + ], + "columns": { + "o_orderkey": "INT", + "o_custkey": "INT", + "o_orderstatus": "TEXT", + "o_totalprice": "DOUBLE", + "o_orderdate": "TEXT", + "o_orderpriority": "TEXT", + "o_clerk": "TEXT", + "o_shippriority": "INT", + "o_comment": "TEXT" + } + }, + "cassandrans.lineitem": { + "transaction": true, + "partition-key": [ + "l_orderkey", + "l_linenumber" + ], + "columns": { + "l_orderkey": "INT", + "l_partkey": "INT", + "l_suppkey": "INT", + "l_linenumber": "INT", + "l_quantity": "INT", + "l_extendedprice": "DOUBLE", + "l_discount": "DOUBLE", + "l_tax": "DOUBLE", + "l_returnflag": "TEXT", + "l_linestatus": "TEXT", + "l_shipdate": "TEXT", + "l_commitdate": "TEXT", + "l_receiptdate": "TEXT", + "l_shipinstruct": "TEXT", + "l_shipmode": "TEXT", + "l_comment": "TEXT" + } + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-graphql-sample/.gitignore b/docs/3.12/scalardb-samples/scalardb-graphql-sample/.gitignore new file mode 100644 index 00000000..7a59fe1f --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-graphql-sample/.gitignore @@ -0,0 +1,3 @@ +/node_modules +/dist +/*.log diff --git a/docs/3.12/scalardb-samples/scalardb-graphql-sample/.prettierrc b/docs/3.12/scalardb-samples/scalardb-graphql-sample/.prettierrc new file mode 100644 index 00000000..0dbf448a --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-graphql-sample/.prettierrc @@ -0,0 +1,5 @@ +{ + "trailingComma": "es5", + "semi": true, + "singleQuote": true +} diff --git a/docs/3.12/scalardb-samples/scalardb-graphql-sample/README.md b/docs/3.12/scalardb-samples/scalardb-graphql-sample/README.md new file mode 100644 index 00000000..15753d55 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-graphql-sample/README.md @@ -0,0 +1,230 @@ +# Create a Sample Application That Uses ScalarDB GraphQL + +{% capture notice--info %} +**Note** + +ScalarDB GraphQL Server is now deprecated, and consequently, this sample code is also deprecated. To use the ScalarDB GraphQL interface, you need to use ScalarDB Cluster, which is available only in the Enterprise edition. For more information, see [ScalarDB Cluster](https://scalardb.scalar-labs.com/docs/latest/scalardb-cluster/). +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +This tutorial describes how to create a sample electronic money application that uses the ScalarDB GraphQL interface. + +## Overview + +In the sample, you will use Cassandra as the database, and the Cassandra server and the ScalarDB GraphQL Server will run in Docker. Then, you will build a Node.js client application that will communicate with the ScalarDB GraphQL Server. + +{% capture notice--info %} +**Note** + +The sample client application is just one example of what you can create. Since GraphQL is a communication pattern, many tools in various programming languages exist for building applications. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### What you can do in this sample application + +The sample application supports the following types of transactions: + +- Credit an amount to a user. +- Send an amount from one user to another user. +- Show a user's balance. + +## Prerequisites + +- [Docker](https://www.docker.com/get-started/) 20.10 or later with [Docker Compose](https://docs.docker.com/compose/install/) V2 or later +- [Node.js](https://nodejs.org/en/download/current/) 16+ + +In addition, you need access to the Docker image for ScalarDB GraphQL in the GitHub Container registry, which is private. The registry is available only those who are using ScalarDB Enterprise. If you need a license for ScalarDB Enterprise, please [contact us](https://scalar-labs.com/contact_us/). + +After confirming that you have access to the ScalarDB GraphQL repository and its packages in the GitHub Container registry, you will need to set your GitHub username and your personal access token. To specify these properties as environment variables, run the following commands, replacing `` with your GitHub username and `` with your personal access token: + +```console +$ export USERNAME= +$ export TOKEN= +``` + +## Set up ScalarDB GraphQL + +The following sections describe how to set up the sample electronic money application. + +### Clone the ScalarDB samples repository + +Open **Terminal**, then clone the ScalarDB samples repository by running the following command: + +```console +$ git clone https://github.com/scalar-labs/scalardb-samples +``` + +Then, go to the directory that contains the sample application by running the following command: + +```console +$ cd scalardb-samples/scalardb-graphql-sample +``` + +### Start Cassandra + +To start the Cassandra server, which is included in the Docker container for the sample application, make sure Docker is running and then run the following command: + +```console +$ docker-compose up -d cassandra +``` + +{% capture notice--info %} +**Note** + +Starting the Docker container may take more than one minute depending on your development environment. + +To check the logs, run the following command: + +```console +$ docker-compose logs -f +``` +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Load the schema + +The database schema (the method in which the data will be organized) for the sample application has already been defined in [`emoney-schema.json`](emoney-schema.json). + +To apply the schema, go to the [ScalarDB Releases](https://github.com/scalar-labs/scalardb/releases) page and download the ScalarDB Schema Loader that matches the version of ScalarDB that you want to use to the `scalardb-samples/scalardb-graphql-sample` folder. + +Then, run the following command, replacing `` with the version of the ScalarDB Schema Loader that you downloaded: + +```console +$ java -jar scalardb-schema-loader-.jar --config database.properties --schema-file emoney-schema.json --coordinator +``` + +### Start the ScalarDB GraphQL Server + +Before starting the ScalarDB GraphQL Server, log in to the GitHub Container registry by running the following command and entering your GitHub credentials as instructed: + +```console +$ docker login ghcr.io +``` + +To start the ScalarDB GraphQL Server, which is included in the Docker container for the sample application, make sure Docker is running and then run the following command: + +```console +$ docker-compose up -d +``` + +{% capture notice--info %} +**Note** + +- Starting the Docker container may take more than one minute depending on your development environment. +- Running this command will start all services except for `schema-loader`, which has a different profile. In addition, since the `cassandra` service is already running, the command will start only the `scalardb-graphql` service. +- The next time you start these services, you will not need to load the schema. Instead, you can start the service by running only the command above. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +After the Docker container has started, open the GraphQL endpoint, http://localhost:8080/graphql, in your web browser. The GraphiQL IDE lets you use and browse the GraphQL schema for the `account` table. + +## Build the client application + +With the ScalarDB GraphQL Server running, you can now build the client application that will communicate with the GraphQL endpoint. + +To download and install the dependencies for the client application, run the following command: + +```console +$ npm install +``` + +The application source code is located in the `src` directory. To compile the source files, run the following command: + +```console +$ npx tsc +``` + +After running this command, the source files should be in a new directory titled `dist` in the `src` directory. + +## Execute transactions and retrieve data in the sample application + +The following sections describe how to execute transactions and retrieve data in the sample electronic money application. + +### Create accounts with a balance + +You need an account with a balance so that you can send funds between accounts. + +To create an account for `user1` that has a balance of `10`, run the following command: + +```console +$ node dist/emoney.js charge user1 10 +``` + +To create an account for `user2` that has a balance of `20`, run the following command: + +```console +$ node dist/emoney.js charge user2 20 +``` + +### Send electronic money between two accounts + +Now that you have created two accounts, you can send funds from one account to the other account. + +To have `user1` pay `5` to `user2`, run the following command: + +```console +$ node dist/emoney.js pay user1 user2 5 +``` + +### Get an account balance + +After sending funds from one account to the other, you can check the balance of each account. + +To get the balance of `user1`, run the following command: + +```console +$ node dist/emoney.js show user1 +``` + +To get the balance of `user2`, run the following command: + +```console +$ node dist/emoney.js show user2 +``` + +### Stop the sample application + +To stop the sample application, stop the Docker container by running the following command: + +```console +$ docker-compose down +``` + +## Reference - Application structure + +This section describes how this client application is configured in detail. + +### Files and directories + +The following list is a description of the key files and directories in this application: + +- `graphql`. The GraphQL schemas (queries and mutations) for the application functions are located in this directory. The GraphQL Code Generator (`graphql-codegen`) reads the schema to generate the client TypeScript code. For more details, see [GraphQL Code Generator](#graphql-code-generator). +- `src`. All TypeScript code is located in this directory. + - `src/generated/graphql.ts`. This is generated by using `graphql-codegen` and is based on the GraphQL endpoint and the local schemas in the `graphql` directory. + - `src/emoney.ts`. This is the main file of the client application. +- `codegen.yml`. This is the configuration file for `graphql-codegen`. + +### GraphQL Code Generator + +The code for this client application was generated by using [GraphQL Code Generator](https://www.graphql-code-generator.com/), and the application was configured by using the `codegen.yml` file, which enables the following three modules in the `node_modules` directory to generate `src/generated/graphql.ts`: + +- `typescript`. As an essential module, generates the TypeScript types based on the GraphQL schema. +- `typescript-operations`. Generates TypeScript types based on the schemas in the `graphql` directory. +- `typescript-graphql-request`. Generates the client code as an SDK. With the functions that the SDK provides, you can call the GraphQL operations that are defined in the schema. + +{% capture notice--info %} +**Note** + +- The `rawRequest: true` configuration specified in `codegen.yml` should match the same configuration in the `typescript-graphql-request` module. This configuration is necessary because you need to access the `extensions` key in the GraphQL response when the server returns the transaction ID. +- To re-generate the code after changing a configuration, run the following command when the sample application is running in Docker: + +```console +$ npm run generate +``` +{% endcapture %} + +
{{ notice--info | markdownify }}
diff --git a/docs/3.12/scalardb-samples/scalardb-graphql-sample/codegen.yml b/docs/3.12/scalardb-samples/scalardb-graphql-sample/codegen.yml new file mode 100644 index 00000000..8497f290 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-graphql-sample/codegen.yml @@ -0,0 +1,11 @@ +overwrite: true +schema: "http://localhost:8080/graphql" +documents: './graphql/*.graphql' +generates: + src/generated/graphql.ts: + plugins: + - typescript + - typescript-operations + - typescript-graphql-request + config: + rawRequest: true diff --git a/docs/3.12/scalardb-samples/scalardb-graphql-sample/database.properties b/docs/3.12/scalardb-samples/scalardb-graphql-sample/database.properties new file mode 100644 index 00000000..5f9ad28e --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-graphql-sample/database.properties @@ -0,0 +1,4 @@ +scalar.db.contact_points=localhost +scalar.db.username=cassandra +scalar.db.password=cassandra +scalar.db.storage=cassandra diff --git a/docs/3.12/scalardb-samples/scalardb-graphql-sample/docker-compose.yml b/docs/3.12/scalardb-samples/scalardb-graphql-sample/docker-compose.yml new file mode 100644 index 00000000..528c6e13 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-graphql-sample/docker-compose.yml @@ -0,0 +1,29 @@ +version: "3.9" +services: + cassandra: + image: cassandra:3.11 + container_name: "cassandradb-1" + ports: + - "9042:9042" + networks: + - scalar-network + + scalardb-graphql: + image: ghcr.io/scalar-labs/scalardb-graphql:3.9.0 + container_name: "scalardb-graphql-1" + depends_on: + - cassandra + restart: "always" + volumes: + - ./graphql-server-database.properties:/scalardb-graphql/database.properties + - ./wait-for-it.sh:/scalardb-graphql/wait-for-it.sh + ports: + - "8080:8080" # scalardb-graphql-server port + entrypoint: /bin/bash + command: ./wait-for-it.sh -t 60 cassandra:9042 -- ./bin/scalardb-graphql-server --config database.properties + networks: + - scalar-network + +networks: + scalar-network: + name: scalar-network diff --git a/docs/3.12/scalardb-samples/scalardb-graphql-sample/emoney-schema.json b/docs/3.12/scalardb-samples/scalardb-graphql-sample/emoney-schema.json new file mode 100644 index 00000000..95a8b99b --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-graphql-sample/emoney-schema.json @@ -0,0 +1,13 @@ +{ + "emoney.account": { + "transaction": true, + "partition-key": [ + "id" + ], + "clustering-key": [], + "columns": { + "id": "TEXT", + "balance": "INT" + } + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-graphql-sample/graphql-server-database.properties b/docs/3.12/scalardb-samples/scalardb-graphql-sample/graphql-server-database.properties new file mode 100644 index 00000000..dcec55da --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-graphql-sample/graphql-server-database.properties @@ -0,0 +1,9 @@ +scalar.db.contact_points=cassandra +scalar.db.username=cassandra +scalar.db.password=cassandra +scalar.db.storage=cassandra + +scalar.db.graphql.port=8080 +scalar.db.graphql.path=/graphql +scalar.db.graphql.namespaces=emoney +scalar.db.graphql.graphiql=true diff --git a/docs/3.12/scalardb-samples/scalardb-graphql-sample/graphql/account.graphql b/docs/3.12/scalardb-samples/scalardb-graphql-sample/graphql/account.graphql new file mode 100644 index 00000000..a200c264 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-graphql-sample/graphql/account.graphql @@ -0,0 +1,24 @@ +fragment accountFields on account_GetPayload { + account { + id + balance + } +} + +query GetAccount($id: String!, $txId: String, $commit: Boolean) +@transaction(id: $txId, commit: $commit) { + get1: account_get(get: { key: { id: $id } }) { + ...accountFields + } +} + +mutation PutAccount( + $id: String! + $balance: Int + $txId: String + $commit: Boolean +) @transaction(id: $txId, commit: $commit) { + put1: account_put( + put: { key: { id: $id }, values: { balance: $balance } } + ) +} diff --git a/docs/3.12/scalardb-samples/scalardb-graphql-sample/graphql/account_pay.graphql b/docs/3.12/scalardb-samples/scalardb-graphql-sample/graphql/account_pay.graphql new file mode 100644 index 00000000..ec924874 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-graphql-sample/graphql/account_pay.graphql @@ -0,0 +1,21 @@ +query GetTwoAccounts($id1: String!, $id2: String!, $txId: String, $commit: Boolean) +@transaction(id: $txId, commit: $commit) { + get1: account_get(get: { key: { id: $id1 } }) { + ...accountFields + } + get2: account_get(get: { key: { id: $id2 } }) { + ...accountFields + } +} + +mutation PutTwoAccounts( + $id1: String! + $balance1: Int + $id2: String! + $balance2: Int + $txId: String + $commit: Boolean +) @transaction(id: $txId, commit: $commit) { + put1: account_put(put: { key: { id: $id1 }, values: { balance: $balance1 } }) + put2: account_put(put: { key: { id: $id2 }, values: { balance: $balance2 } }) +} diff --git a/docs/3.12/scalardb-samples/scalardb-graphql-sample/package.json b/docs/3.12/scalardb-samples/scalardb-graphql-sample/package.json new file mode 100644 index 00000000..ca27c997 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-graphql-sample/package.json @@ -0,0 +1,17 @@ +{ + "dependencies": { + "graphql": "^16.3.0", + "yargs": "^17.3.1" + }, + "devDependencies": { + "@graphql-codegen/cli": "2.6.2", + "@graphql-codegen/typescript": "^2.4.7", + "@graphql-codegen/typescript-graphql-request": "^4.4.2", + "@graphql-codegen/typescript-operations": "^2.3.4", + "@types/yargs": "^17.0.9", + "typescript": "^4.6.2" + }, + "scripts": { + "generate": "graphql-codegen --config codegen.yml" + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-graphql-sample/src/emoney.ts b/docs/3.12/scalardb-samples/scalardb-graphql-sample/src/emoney.ts new file mode 100644 index 00000000..1f8ed7a8 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-graphql-sample/src/emoney.ts @@ -0,0 +1,146 @@ +import yargs from 'yargs/yargs'; +import { hideBin } from 'yargs/helpers'; +import { GraphQLClient } from 'graphql-request'; +import { GraphQLError } from 'graphql-request/dist/types'; +import { + getSdk, + GetAccountQuery, + PutAccountMutation, + GetTwoAccountsQuery, + PutTwoAccountsMutation, +} from './generated/graphql'; + +const client = new GraphQLClient('http://localhost:8080/graphql'); +const sdk = getSdk(client); + +yargs(hideBin(process.argv)) + .command('show ', 'Show the account of ', {}, (argv) => { + show(argv.user as string); + }) + .command( + 'charge ', + 'Charge the to the account for ', + {}, + (argv) => { + console.info(`Charging the amount ${argv.amount} to ${argv.user}`); + charge(argv.user as string, argv.amount as number); + } + ) + .command( + 'pay ', + 'Pay the from to ', + {}, + (argv) => { + console.info( + `Paying the amount ${argv.amount} from ${argv.from_user} to ${argv.to_user}` + ); + pay( + argv.from_user as string, + argv.to_user as string, + argv.amount as number + ); + } + ) + .demandCommand(1) + .help() + .parse(); + +async function show(id: string): Promise { + // Retrieve the current balance for id + const { data, errors } = await sdk.GetAccount({ id, commit: true }); + if (errors) { + console.error(errors); + throw new Error('An error happened'); + } + const account = data?.get1?.account; + if (account) { + console.info(`Balance for account ${id}: ${account.balance}`); + } else { + console.info(`Account ${id} is not found`); + } +} + +async function charge(id: string, amount: number): Promise { + let dataGet: GetAccountQuery | undefined, + dataPut: PutAccountMutation | undefined, + errors: GraphQLError[] | undefined, + extensions: { transaction: { id: string } }; + + // Retrieve the current balance for id + ({ data: dataGet, errors, extensions } = await sdk.GetAccount({ id })); + + if (errors) { + console.error(errors); + throw new Error('An error happened'); + } + + // Calculate the balance + const balance = (dataGet!.get1!.account?.balance || 0) + amount; + + // Update the balance and commit the transaction + ({ + data: dataPut, + errors, + extensions, + } = await sdk.PutAccount({ + id, + balance, + txId: extensions!.transaction!.id, + commit: true, + })); + + if (errors) { + console.error(errors); + throw new Error('An error happened'); + } +} + +async function pay( + from_id: string, + to_id: string, + amount: number +): Promise { + let dataGet: GetTwoAccountsQuery | undefined, + dataPut: PutTwoAccountsMutation | undefined, + errors: GraphQLError[] | undefined, + extensions: { transaction: { id: string } }; + + // Retrieve the current balances for given ids + ({ + data: dataGet, + errors, + extensions, + } = await sdk.GetTwoAccounts({ + id1: from_id, + id2: to_id, + })); + + if (errors) { + console.error(errors); + throw new Error('An error happened'); + } + + // Calculate the balances + const balance1 = (dataGet!.get1!.account?.balance || 0) - amount; + const balance2 = (dataGet!.get2!.account?.balance || 0) + amount; + + // Update the balances and commit the transaction + const txId = extensions!.transaction!.id; + ({ + data: dataPut, + errors, + extensions, + } = await sdk.PutTwoAccounts({ + id1: from_id, + id2: to_id, + balance1, + balance2, + txId, + commit: true, + })); + + if (errors) { + console.error(errors); + throw new Error('An error happened'); + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-graphql-sample/src/generated/graphql.ts b/docs/3.12/scalardb-samples/scalardb-graphql-sample/src/generated/graphql.ts new file mode 100644 index 00000000..550ed395 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-graphql-sample/src/generated/graphql.ts @@ -0,0 +1,261 @@ +import { GraphQLClient } from 'graphql-request'; +import * as Dom from 'graphql-request/dist/types.dom'; +import { GraphQLError } from 'graphql-request/dist/types'; +import { print } from 'graphql' +import gql from 'graphql-tag'; +export type Maybe = T | null; +export type InputMaybe = Maybe; +export type Exact = { [K in keyof T]: T[K] }; +export type MakeOptional = Omit & { [SubKey in K]?: Maybe }; +export type MakeMaybe = Omit & { [SubKey in K]: Maybe }; +/** All built-in and custom scalars, mapped to their actual values */ +export type Scalars = { + ID: string; + String: string; + Boolean: boolean; + Int: number; + Float: number; + BigInt: any; + Float32: any; +}; + +export type ConditionalExpression = { + bigIntValue?: InputMaybe; + booleanValue?: InputMaybe; + doubleValue?: InputMaybe; + floatValue?: InputMaybe; + intValue?: InputMaybe; + name: Scalars['String']; + operator: ConditionalExpressionOperator; + textValue?: InputMaybe; +}; + +export enum ConditionalExpressionOperator { + Eq = 'EQ', + Gt = 'GT', + Gte = 'GTE', + Lt = 'LT', + Lte = 'LTE', + Ne = 'NE' +} + +export enum Consistency { + Eventual = 'EVENTUAL', + Linearizable = 'LINEARIZABLE', + Sequential = 'SEQUENTIAL' +} + +export type DeleteCondition = { + expressions?: InputMaybe>; + type: DeleteConditionType; +}; + +export enum DeleteConditionType { + DeleteIf = 'DeleteIf', + DeleteIfExists = 'DeleteIfExists' +} + +export type Mutation = { + __typename?: 'Mutation'; + abort: Scalars['Boolean']; + account_bulkDelete: Scalars['Boolean']; + account_bulkPut: Scalars['Boolean']; + account_delete: Scalars['Boolean']; + account_mutate: Scalars['Boolean']; + account_put: Scalars['Boolean']; +}; + + +export type MutationAccount_BulkDeleteArgs = { + delete: Array; +}; + + +export type MutationAccount_BulkPutArgs = { + put: Array; +}; + + +export type MutationAccount_DeleteArgs = { + delete: Account_DeleteInput; +}; + + +export type MutationAccount_MutateArgs = { + delete?: InputMaybe>; + put?: InputMaybe>; +}; + + +export type MutationAccount_PutArgs = { + put: Account_PutInput; +}; + +export enum Order { + Asc = 'ASC', + Desc = 'DESC' +} + +export type PutCondition = { + expressions?: InputMaybe>; + type: PutConditionType; +}; + +export enum PutConditionType { + PutIf = 'PutIf', + PutIfExists = 'PutIfExists', + PutIfNotExists = 'PutIfNotExists' +} + +export type Query = { + __typename?: 'Query'; + account_get?: Maybe; +}; + + +export type QueryAccount_GetArgs = { + get: Account_GetInput; +}; + +export type Account = { + __typename?: 'account'; + balance?: Maybe; + id?: Maybe; +}; + +export type Account_DeleteInput = { + condition?: InputMaybe; + consistency?: InputMaybe; + key: Account_Key; +}; + +export type Account_GetInput = { + consistency?: InputMaybe; + key: Account_Key; +}; + +export type Account_GetPayload = { + __typename?: 'account_GetPayload'; + account?: Maybe; +}; + +export type Account_Key = { + id: Scalars['String']; +}; + +export type Account_PutInput = { + condition?: InputMaybe; + consistency?: InputMaybe; + key: Account_Key; + values: Account_PutValues; +}; + +export type Account_PutValues = { + balance?: InputMaybe; +}; + +export type AccountFieldsFragment = { __typename?: 'account_GetPayload', account?: { __typename?: 'account', id?: string | null, balance?: number | null } | null }; + +export type GetAccountQueryVariables = Exact<{ + id: Scalars['String']; + txId?: InputMaybe; + commit?: InputMaybe; +}>; + + +export type GetAccountQuery = { __typename?: 'Query', get1?: { __typename?: 'account_GetPayload', account?: { __typename?: 'account', id?: string | null, balance?: number | null } | null } | null }; + +export type PutAccountMutationVariables = Exact<{ + id: Scalars['String']; + balance?: InputMaybe; + txId?: InputMaybe; + commit?: InputMaybe; +}>; + + +export type PutAccountMutation = { __typename?: 'Mutation', put1: boolean }; + +export type GetTwoAccountsQueryVariables = Exact<{ + id1: Scalars['String']; + id2: Scalars['String']; + txId?: InputMaybe; + commit?: InputMaybe; +}>; + + +export type GetTwoAccountsQuery = { __typename?: 'Query', get1?: { __typename?: 'account_GetPayload', account?: { __typename?: 'account', id?: string | null, balance?: number | null } | null } | null, get2?: { __typename?: 'account_GetPayload', account?: { __typename?: 'account', id?: string | null, balance?: number | null } | null } | null }; + +export type PutTwoAccountsMutationVariables = Exact<{ + id1: Scalars['String']; + balance1?: InputMaybe; + id2: Scalars['String']; + balance2?: InputMaybe; + txId?: InputMaybe; + commit?: InputMaybe; +}>; + + +export type PutTwoAccountsMutation = { __typename?: 'Mutation', put1: boolean, put2: boolean }; + +export const AccountFieldsFragmentDoc = gql` + fragment accountFields on account_GetPayload { + account { + id + balance + } +} + `; +export const GetAccountDocument = gql` + query GetAccount($id: String!, $txId: String, $commit: Boolean) @transaction(id: $txId, commit: $commit) { + get1: account_get(get: {key: {id: $id}}) { + ...accountFields + } +} + ${AccountFieldsFragmentDoc}`; +export const PutAccountDocument = gql` + mutation PutAccount($id: String!, $balance: Int, $txId: String, $commit: Boolean) @transaction(id: $txId, commit: $commit) { + put1: account_put(put: {key: {id: $id}, values: {balance: $balance}}) +} + `; +export const GetTwoAccountsDocument = gql` + query GetTwoAccounts($id1: String!, $id2: String!, $txId: String, $commit: Boolean) @transaction(id: $txId, commit: $commit) { + get1: account_get(get: {key: {id: $id1}}) { + ...accountFields + } + get2: account_get(get: {key: {id: $id2}}) { + ...accountFields + } +} + ${AccountFieldsFragmentDoc}`; +export const PutTwoAccountsDocument = gql` + mutation PutTwoAccounts($id1: String!, $balance1: Int, $id2: String!, $balance2: Int, $txId: String, $commit: Boolean) @transaction(id: $txId, commit: $commit) { + put1: account_put(put: {key: {id: $id1}, values: {balance: $balance1}}) + put2: account_put(put: {key: {id: $id2}, values: {balance: $balance2}}) +} + `; + +export type SdkFunctionWrapper = (action: (requestHeaders?:Record) => Promise, operationName: string, operationType?: string) => Promise; + + +const defaultWrapper: SdkFunctionWrapper = (action, _operationName, _operationType) => action(); +const GetAccountDocumentString = print(GetAccountDocument); +const PutAccountDocumentString = print(PutAccountDocument); +const GetTwoAccountsDocumentString = print(GetTwoAccountsDocument); +const PutTwoAccountsDocumentString = print(PutTwoAccountsDocument); +export function getSdk(client: GraphQLClient, withWrapper: SdkFunctionWrapper = defaultWrapper) { + return { + GetAccount(variables: GetAccountQueryVariables, requestHeaders?: Dom.RequestInit["headers"]): Promise<{ data?: GetAccountQuery | undefined; extensions?: any; headers: Dom.Headers; status: number; errors?: GraphQLError[] | undefined; }> { + return withWrapper((wrappedRequestHeaders) => client.rawRequest(GetAccountDocumentString, variables, {...requestHeaders, ...wrappedRequestHeaders}), 'GetAccount', 'query'); + }, + PutAccount(variables: PutAccountMutationVariables, requestHeaders?: Dom.RequestInit["headers"]): Promise<{ data?: PutAccountMutation | undefined; extensions?: any; headers: Dom.Headers; status: number; errors?: GraphQLError[] | undefined; }> { + return withWrapper((wrappedRequestHeaders) => client.rawRequest(PutAccountDocumentString, variables, {...requestHeaders, ...wrappedRequestHeaders}), 'PutAccount', 'mutation'); + }, + GetTwoAccounts(variables: GetTwoAccountsQueryVariables, requestHeaders?: Dom.RequestInit["headers"]): Promise<{ data?: GetTwoAccountsQuery | undefined; extensions?: any; headers: Dom.Headers; status: number; errors?: GraphQLError[] | undefined; }> { + return withWrapper((wrappedRequestHeaders) => client.rawRequest(GetTwoAccountsDocumentString, variables, {...requestHeaders, ...wrappedRequestHeaders}), 'GetTwoAccounts', 'query'); + }, + PutTwoAccounts(variables: PutTwoAccountsMutationVariables, requestHeaders?: Dom.RequestInit["headers"]): Promise<{ data?: PutTwoAccountsMutation | undefined; extensions?: any; headers: Dom.Headers; status: number; errors?: GraphQLError[] | undefined; }> { + return withWrapper((wrappedRequestHeaders) => client.rawRequest(PutTwoAccountsDocumentString, variables, {...requestHeaders, ...wrappedRequestHeaders}), 'PutTwoAccounts', 'mutation'); + } + }; +} +export type Sdk = ReturnType; \ No newline at end of file diff --git a/docs/3.12/scalardb-samples/scalardb-graphql-sample/tsconfig.json b/docs/3.12/scalardb-samples/scalardb-graphql-sample/tsconfig.json new file mode 100644 index 00000000..93ced35a --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-graphql-sample/tsconfig.json @@ -0,0 +1,102 @@ +{ + "compilerOptions": { + /* Visit https://aka.ms/tsconfig.json to read more about this file */ + + /* Projects */ + // "incremental": true, /* Enable incremental compilation */ + // "composite": true, /* Enable constraints that allow a TypeScript project to be used with project references. */ + // "tsBuildInfoFile": "./", /* Specify the folder for .tsbuildinfo incremental compilation files. */ + // "disableSourceOfProjectReferenceRedirect": true, /* Disable preferring source files instead of declaration files when referencing composite projects */ + // "disableSolutionSearching": true, /* Opt a project out of multi-project reference checking when editing. */ + // "disableReferencedProjectLoad": true, /* Reduce the number of projects loaded automatically by TypeScript. */ + + /* Language and Environment */ + "target": "es2016", /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */ + // "lib": [], /* Specify a set of bundled library declaration files that describe the target runtime environment. */ + // "jsx": "preserve", /* Specify what JSX code is generated. */ + // "experimentalDecorators": true, /* Enable experimental support for TC39 stage 2 draft decorators. */ + // "emitDecoratorMetadata": true, /* Emit design-type metadata for decorated declarations in source files. */ + // "jsxFactory": "", /* Specify the JSX factory function used when targeting React JSX emit, e.g. 'React.createElement' or 'h' */ + // "jsxFragmentFactory": "", /* Specify the JSX Fragment reference used for fragments when targeting React JSX emit e.g. 'React.Fragment' or 'Fragment'. */ + // "jsxImportSource": "", /* Specify module specifier used to import the JSX factory functions when using `jsx: react-jsx*`.` */ + // "reactNamespace": "", /* Specify the object invoked for `createElement`. This only applies when targeting `react` JSX emit. */ + // "noLib": true, /* Disable including any library files, including the default lib.d.ts. */ + // "useDefineForClassFields": true, /* Emit ECMAScript-standard-compliant class fields. */ + + /* Modules */ + "module": "commonjs", /* Specify what module code is generated. */ + // "rootDir": "./", /* Specify the root folder within your source files. */ + // "moduleResolution": "node", /* Specify how TypeScript looks up a file from a given module specifier. */ + // "baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */ + // "paths": {}, /* Specify a set of entries that re-map imports to additional lookup locations. */ + // "rootDirs": [], /* Allow multiple folders to be treated as one when resolving modules. */ + // "typeRoots": [], /* Specify multiple folders that act like `./node_modules/@types`. */ + // "types": [], /* Specify type package names to be included without being referenced in a source file. */ + // "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */ + // "resolveJsonModule": true, /* Enable importing .json files */ + // "noResolve": true, /* Disallow `import`s, `require`s or ``s from expanding the number of files TypeScript should add to a project. */ + + /* JavaScript Support */ + // "allowJs": true, /* Allow JavaScript files to be a part of your program. Use the `checkJS` option to get errors from these files. */ + // "checkJs": true, /* Enable error reporting in type-checked JavaScript files. */ + // "maxNodeModuleJsDepth": 1, /* Specify the maximum folder depth used for checking JavaScript files from `node_modules`. Only applicable with `allowJs`. */ + + /* Emit */ + // "declaration": true, /* Generate .d.ts files from TypeScript and JavaScript files in your project. */ + // "declarationMap": true, /* Create sourcemaps for d.ts files. */ + // "emitDeclarationOnly": true, /* Only output d.ts files and not JavaScript files. */ + // "sourceMap": true, /* Create source map files for emitted JavaScript files. */ + // "outFile": "./", /* Specify a file that bundles all outputs into one JavaScript file. If `declaration` is true, also designates a file that bundles all .d.ts output. */ + "outDir": "./dist", /* Specify an output folder for all emitted files. */ + // "removeComments": true, /* Disable emitting comments. */ + // "noEmit": true, /* Disable emitting files from a compilation. */ + // "importHelpers": true, /* Allow importing helper functions from tslib once per project, instead of including them per-file. */ + // "importsNotUsedAsValues": "remove", /* Specify emit/checking behavior for imports that are only used for types */ + // "downlevelIteration": true, /* Emit more compliant, but verbose and less performant JavaScript for iteration. */ + // "sourceRoot": "", /* Specify the root path for debuggers to find the reference source code. */ + // "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */ + // "inlineSourceMap": true, /* Include sourcemap files inside the emitted JavaScript. */ + // "inlineSources": true, /* Include source code in the sourcemaps inside the emitted JavaScript. */ + // "emitBOM": true, /* Emit a UTF-8 Byte Order Mark (BOM) in the beginning of output files. */ + // "newLine": "crlf", /* Set the newline character for emitting files. */ + // "stripInternal": true, /* Disable emitting declarations that have `@internal` in their JSDoc comments. */ + // "noEmitHelpers": true, /* Disable generating custom helper functions like `__extends` in compiled output. */ + // "noEmitOnError": true, /* Disable emitting files if any type checking errors are reported. */ + // "preserveConstEnums": true, /* Disable erasing `const enum` declarations in generated code. */ + // "declarationDir": "./", /* Specify the output directory for generated declaration files. */ + // "preserveValueImports": true, /* Preserve unused imported values in the JavaScript output that would otherwise be removed. */ + + /* Interop Constraints */ + // "isolatedModules": true, /* Ensure that each file can be safely transpiled without relying on other imports. */ + // "allowSyntheticDefaultImports": true, /* Allow 'import x from y' when a module doesn't have a default export. */ + "esModuleInterop": true, /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables `allowSyntheticDefaultImports` for type compatibility. */ + // "preserveSymlinks": true, /* Disable resolving symlinks to their realpath. This correlates to the same flag in node. */ + "forceConsistentCasingInFileNames": true, /* Ensure that casing is correct in imports. */ + + /* Type Checking */ + "strict": true, /* Enable all strict type-checking options. */ + // "noImplicitAny": true, /* Enable error reporting for expressions and declarations with an implied `any` type.. */ + // "strictNullChecks": true, /* When type checking, take into account `null` and `undefined`. */ + // "strictFunctionTypes": true, /* When assigning functions, check to ensure parameters and the return values are subtype-compatible. */ + // "strictBindCallApply": true, /* Check that the arguments for `bind`, `call`, and `apply` methods match the original function. */ + // "strictPropertyInitialization": true, /* Check for class properties that are declared but not set in the constructor. */ + // "noImplicitThis": true, /* Enable error reporting when `this` is given the type `any`. */ + // "useUnknownInCatchVariables": true, /* Type catch clause variables as 'unknown' instead of 'any'. */ + // "alwaysStrict": true, /* Ensure 'use strict' is always emitted. */ + // "noUnusedLocals": true, /* Enable error reporting when a local variables aren't read. */ + // "noUnusedParameters": true, /* Raise an error when a function parameter isn't read */ + // "exactOptionalPropertyTypes": true, /* Interpret optional property types as written, rather than adding 'undefined'. */ + // "noImplicitReturns": true, /* Enable error reporting for codepaths that do not explicitly return in a function. */ + // "noFallthroughCasesInSwitch": true, /* Enable error reporting for fallthrough cases in switch statements. */ + // "noUncheckedIndexedAccess": true, /* Include 'undefined' in index signature results */ + // "noImplicitOverride": true, /* Ensure overriding members in derived classes are marked with an override modifier. */ + // "noPropertyAccessFromIndexSignature": true, /* Enforces using indexed accessors for keys declared using an indexed type */ + // "allowUnusedLabels": true, /* Disable error reporting for unused labels. */ + // "allowUnreachableCode": true, /* Disable error reporting for unreachable code. */ + + /* Completeness */ + // "skipDefaultLibCheck": true, /* Skip type checking .d.ts files that are included with TypeScript. */ + "skipLibCheck": true /* Skip type checking all .d.ts files. */ + }, + "include": ["src/**/*"] +} diff --git a/docs/3.12/scalardb-samples/scalardb-graphql-sample/wait-for-it.sh b/docs/3.12/scalardb-samples/scalardb-graphql-sample/wait-for-it.sh new file mode 100755 index 00000000..3974640b --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-graphql-sample/wait-for-it.sh @@ -0,0 +1,182 @@ +#!/usr/bin/env bash +# Use this script to test if a given TCP host/port are available + +WAITFORIT_cmdname=${0##*/} + +echoerr() { if [[ $WAITFORIT_QUIET -ne 1 ]]; then echo "$@" 1>&2; fi } + +usage() +{ + cat << USAGE >&2 +Usage: + $WAITFORIT_cmdname host:port [-s] [-t timeout] [-- command args] + -h HOST | --host=HOST Host or IP under test + -p PORT | --port=PORT TCP port under test + Alternatively, you specify the host and port as host:port + -s | --strict Only execute subcommand if the test succeeds + -q | --quiet Don't output any status messages + -t TIMEOUT | --timeout=TIMEOUT + Timeout in seconds, zero for no timeout + -- COMMAND ARGS Execute command with args after the test finishes +USAGE + exit 1 +} + +wait_for() +{ + if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then + echoerr "$WAITFORIT_cmdname: waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" + else + echoerr "$WAITFORIT_cmdname: waiting for $WAITFORIT_HOST:$WAITFORIT_PORT without a timeout" + fi + WAITFORIT_start_ts=$(date +%s) + while : + do + if [[ $WAITFORIT_ISBUSY -eq 1 ]]; then + nc -z $WAITFORIT_HOST $WAITFORIT_PORT + WAITFORIT_result=$? + else + (echo -n > /dev/tcp/$WAITFORIT_HOST/$WAITFORIT_PORT) >/dev/null 2>&1 + WAITFORIT_result=$? + fi + if [[ $WAITFORIT_result -eq 0 ]]; then + WAITFORIT_end_ts=$(date +%s) + echoerr "$WAITFORIT_cmdname: $WAITFORIT_HOST:$WAITFORIT_PORT is available after $((WAITFORIT_end_ts - WAITFORIT_start_ts)) seconds" + break + fi + sleep 1 + done + return $WAITFORIT_result +} + +wait_for_wrapper() +{ + # In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692 + if [[ $WAITFORIT_QUIET -eq 1 ]]; then + timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --quiet --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & + else + timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & + fi + WAITFORIT_PID=$! + trap "kill -INT -$WAITFORIT_PID" INT + wait $WAITFORIT_PID + WAITFORIT_RESULT=$? + if [[ $WAITFORIT_RESULT -ne 0 ]]; then + echoerr "$WAITFORIT_cmdname: timeout occurred after waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" + fi + return $WAITFORIT_RESULT +} + +# process arguments +while [[ $# -gt 0 ]] +do + case "$1" in + *:* ) + WAITFORIT_hostport=(${1//:/ }) + WAITFORIT_HOST=${WAITFORIT_hostport[0]} + WAITFORIT_PORT=${WAITFORIT_hostport[1]} + shift 1 + ;; + --child) + WAITFORIT_CHILD=1 + shift 1 + ;; + -q | --quiet) + WAITFORIT_QUIET=1 + shift 1 + ;; + -s | --strict) + WAITFORIT_STRICT=1 + shift 1 + ;; + -h) + WAITFORIT_HOST="$2" + if [[ $WAITFORIT_HOST == "" ]]; then break; fi + shift 2 + ;; + --host=*) + WAITFORIT_HOST="${1#*=}" + shift 1 + ;; + -p) + WAITFORIT_PORT="$2" + if [[ $WAITFORIT_PORT == "" ]]; then break; fi + shift 2 + ;; + --port=*) + WAITFORIT_PORT="${1#*=}" + shift 1 + ;; + -t) + WAITFORIT_TIMEOUT="$2" + if [[ $WAITFORIT_TIMEOUT == "" ]]; then break; fi + shift 2 + ;; + --timeout=*) + WAITFORIT_TIMEOUT="${1#*=}" + shift 1 + ;; + --) + shift + WAITFORIT_CLI=("$@") + break + ;; + --help) + usage + ;; + *) + echoerr "Unknown argument: $1" + usage + ;; + esac +done + +if [[ "$WAITFORIT_HOST" == "" || "$WAITFORIT_PORT" == "" ]]; then + echoerr "Error: you need to provide a host and port to test." + usage +fi + +WAITFORIT_TIMEOUT=${WAITFORIT_TIMEOUT:-15} +WAITFORIT_STRICT=${WAITFORIT_STRICT:-0} +WAITFORIT_CHILD=${WAITFORIT_CHILD:-0} +WAITFORIT_QUIET=${WAITFORIT_QUIET:-0} + +# Check to see if timeout is from busybox? +WAITFORIT_TIMEOUT_PATH=$(type -p timeout) +WAITFORIT_TIMEOUT_PATH=$(realpath $WAITFORIT_TIMEOUT_PATH 2>/dev/null || readlink -f $WAITFORIT_TIMEOUT_PATH) + +WAITFORIT_BUSYTIMEFLAG="" +if [[ $WAITFORIT_TIMEOUT_PATH =~ "busybox" ]]; then + WAITFORIT_ISBUSY=1 + # Check if busybox timeout uses -t flag + # (recent Alpine versions don't support -t anymore) + if timeout &>/dev/stdout | grep -q -e '-t '; then + WAITFORIT_BUSYTIMEFLAG="-t" + fi +else + WAITFORIT_ISBUSY=0 +fi + +if [[ $WAITFORIT_CHILD -gt 0 ]]; then + wait_for + WAITFORIT_RESULT=$? + exit $WAITFORIT_RESULT +else + if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then + wait_for_wrapper + WAITFORIT_RESULT=$? + else + wait_for + WAITFORIT_RESULT=$? + fi +fi + +if [[ $WAITFORIT_CLI != "" ]]; then + if [[ $WAITFORIT_RESULT -ne 0 && $WAITFORIT_STRICT -eq 1 ]]; then + echoerr "$WAITFORIT_cmdname: strict mode, refusing to execute subprocess" + exit $WAITFORIT_RESULT + fi + exec "${WAITFORIT_CLI[@]}" +else + exit $WAITFORIT_RESULT +fi \ No newline at end of file diff --git a/docs/3.12/scalardb-samples/scalardb-sample/README.md b/docs/3.12/scalardb-samples/scalardb-sample/README.md new file mode 100644 index 00000000..1b83bd8f --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sample/README.md @@ -0,0 +1,318 @@ +# Create a Sample Application That Uses ScalarDB + +This tutorial describes how to create a sample e-commerce application by using ScalarDB. + +## Overview + +The sample e-commerce application shows how users can order and pay for items by using a line of credit. + +The database that you will be using in the sample application is Cassandra. Although Cassandra does not provide ACID transaction capabilities, you can make transactions ACID compliant by having your application connect to the database through ScalarDB. + +To reference the sample application source code, see [src/main/java/sample/Sample.java](src/main/java/sample/Sample.java). + +{% capture notice--info %} +**Note** + +Since the focus of the sample application is to demonstrate using ScalarDB, application-specific error handling, authentication processing, and similar functions are not included in the sample application. For details about exception handling in ScalarDB, see [Handle exceptions](https://github.com/scalar-labs/scalardb/blob/master/docs/api-guide.md#handle-exceptions). +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### What you can do in this sample application + +The sample application supports the following types of transactions: + +- Get customer information. +- Place an order by using a line of credit. + - Checks if the cost of the order is below the customer's credit limit. + - If the check passes, records the order history and updates the amount the customer has spent. +- Get order information by order ID. +- Get order information by customer ID. +- Make a payment. + - Reduces the amount the customer has spent. + +## Prerequisites + +- One of the following Java Development Kits (JDKs): + - [Oracle JDK](https://www.oracle.com/java/technologies/downloads/) LTS version (8, 11, or 17) + - [OpenJDK](https://openjdk.org/install/) LTS version (8, 11, or 17) +- [Docker](https://www.docker.com/get-started/) 20.10 or later with [Docker Compose](https://docs.docker.com/compose/install/) V2 or later + +{% capture notice--info %} +**Note** + +We recommend using the LTS versions mentioned above, but other non-LTS versions may work. + +In addition, other JDKs should work with ScalarDB, but we haven't tested them. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Set up ScalarDB + +The following sections describe how to set up the sample e-commerce application. + +### Clone the ScalarDB samples repository + +Open **Terminal**, then clone the ScalarDB samples repository by running the following command: + +```console +$ git clone https://github.com/scalar-labs/scalardb-samples +``` + +Then, go to the directory that contains the sample application by running the following command: + +```console +$ cd scalardb-samples/scalardb-sample +``` + +### Start Cassandra + +Cassandra is already configured for the sample application, as shown in [`database.properties`](database.properties). + +To start Cassandra, which is included in the Docker container for the sample application, make sure Docker is running and then run the following command: + +```console +$ docker-compose up -d +``` + +{% capture notice--info %} +**Note** + +Starting the Docker container may take more than one minute depending on your development environment. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Load the schema + +The database schema (the method in which the data will be organized) for the sample application has already been defined in [`schema.json`](schema.json). + +To apply the schema, go to the [ScalarDB Releases](https://github.com/scalar-labs/scalardb/releases) page and download the ScalarDB Schema Loader that matches the version of ScalarDB that you want to use to the `scalardb-samples/scalardb-sample` folder. + +Then, run the following command, replacing `` with the version of the ScalarDB Schema Loader that you downloaded: + +```console +$ java -jar scalardb-schema-loader-.jar --config database.properties --schema-file schema.json --coordinator +``` + +#### Schema details + +As shown in [`schema.json`](schema.json) for the sample application, all the tables are created in the `sample` namespace. + +- `sample.customers`: a table that manages customer information + - `credit_limit`: the maximum amount of money that the lender will allow the customer to spend from their line of credit + - `credit_total`: the amount of money that the customer has spent from their line of credit +- `sample.orders`: a table that manages order information +- `sample.statements`: a table that manages order statement information +- `sample.items`: a table that manages information for items to be ordered + +The Entity Relationship Diagram for the schema is as follows: + +![ERD](images/ERD.png) + +### Load the initial data + +After the Docker container has started, load the initial data by running the following command: + +```console +$ ./gradlew run --args="LoadInitialData" +``` + +After the initial data has loaded, the following records should be stored in the tables. + +**`sample.customers` table** + +| customer_id | name | credit_limit | credit_total | +|-------------|---------------|--------------|--------------| +| 1 | Yamada Taro | 10000 | 0 | +| 2 | Yamada Hanako | 10000 | 0 | +| 3 | Suzuki Ichiro | 10000 | 0 | + +**`sample.items` table** + +| item_id | name | price | +|---------|--------|-------| +| 1 | Apple | 1000 | +| 2 | Orange | 2000 | +| 3 | Grape | 2500 | +| 4 | Mango | 5000 | +| 5 | Melon | 3000 | + +## Execute transactions and retrieve data in the sample application + +The following sections describe how to execute transactions and retrieve data in the sample e-commerce application. + +### Get customer information + +Start with getting information about the customer whose ID is `1` by running the following command: + +```console +$ ./gradlew run --args="GetCustomerInfo 1" +``` + +You should see the following output: + +```console +... +{"id": 1, "name": "Yamada Taro", "credit_limit": 10000, "credit_total": 0} +... +``` + +### Place an order + +Then, have customer ID `1` place an order for three apples and two oranges by running the following command: + +{% capture notice--info %} +**Note** + +The order format in this command is `./gradlew run --args="PlaceOrder :,:,..."`. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +```console +$ ./gradlew run --args="PlaceOrder 1 1:3,2:2" +``` + +You should see a similar output as below, with a different UUID for `order_id`, which confirms that the order was successful: + +```console +... +{"order_id": "dea4964a-ff50-4ecf-9201-027981a1566e"} +... +``` + +### Check order details + +Check details about the order by running the following command, replacing `` with the UUID for the `order_id` that was shown after running the previous command: + +```console +$ ./gradlew run --args="GetOrder " +``` + +You should see a similar output as below, with different UUIDs for `order_id` and `timestamp`: + +```console +... +{"order": {"order_id": "dea4964a-ff50-4ecf-9201-027981a1566e","timestamp": 1650948340914,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 1,"item_name": "Apple","price": 1000,"count": 3,"total": 3000},{"item_id": 2,"item_name": "Orange","price": 2000,"count": 2,"total": 4000}],"total": 7000}} +... +``` + +### Place another order + +Place an order for one melon that uses the remaining amount in `credit_total` for customer ID `1` by running the following command: + +```console +$ ./gradlew run --args="PlaceOrder 1 5:1" +``` + +You should see a similar output as below, with a different UUID for `order_id`, which confirms that the order was successful: + +```console +... +{"order_id": "bcc34150-91fa-4bea-83db-d2dbe6f0f30d"} +... +``` + +### Check order history + +Get the history of all orders for customer ID `1` by running the following command: + +```console +$ ./gradlew run --args="GetOrders 1" +``` + +You should see a similar output as below, with different UUIDs for `order_id` and `timestamp`, which shows the history of all orders for customer ID `1` in descending order by timestamp: + +```console +... +{"order": [{"order_id": "dea4964a-ff50-4ecf-9201-027981a1566e","timestamp": 1650948340914,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 1,"item_name": "Apple","price": 1000,"count": 3,"total": 3000},{"item_id": 2,"item_name": "Orange","price": 2000,"count": 2,"total": 4000}],"total": 7000},{"order_id": "bcc34150-91fa-4bea-83db-d2dbe6f0f30d","timestamp": 1650948412766,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 5,"item_name": "Melon","price": 3000,"count": 1,"total": 3000}],"total": 3000}]} +... +``` + +### Check credit total + +Get the credit total for customer ID `1` by running the following command: + +```console +$ ./gradlew run --args="GetCustomerInfo 1" +``` + +You should see the following output, which shows that customer ID `1` has reached their `credit_limit` in `credit_total` and cannot place anymore orders: + +```console +... +{"id": 1, "name": "Yamada Taro", "credit_limit": 10000, "credit_total": 10000} +... +``` + +Try to place an order for one grape and one mango by running the following command: + +```console +$ ./gradlew run --args="PlaceOrder 1 3:1,4:1" +``` + +You should see the following output, which shows that the order failed because the `credit_total` amount would exceed the `credit_limit` amount. + +```console +... +java.lang.RuntimeException: Credit limit exceeded + at sample.Sample.placeOrder(Sample.java:205) + at sample.command.PlaceOrderCommand.call(PlaceOrderCommand.java:33) + at sample.command.PlaceOrderCommand.call(PlaceOrderCommand.java:8) + at picocli.CommandLine.executeUserObject(CommandLine.java:1783) + at picocli.CommandLine.access$900(CommandLine.java:145) + at picocli.CommandLine$RunLast.handle(CommandLine.java:2141) + at picocli.CommandLine$RunLast.handle(CommandLine.java:2108) + at picocli.CommandLine$AbstractParseResultHandler.execute(CommandLine.java:1975) + at picocli.CommandLine.execute(CommandLine.java:1904) + at sample.command.SampleCommand.main(SampleCommand.java:35) +... +``` + +### Make a payment + +To continue making orders, customer ID `1` must make a payment to reduce the `credit_total` amount. + +Make a payment by running the following command: + +```console +$ ./gradlew run --args="Repayment 1 8000" +``` + +Then, check the `credit_total` amount for customer ID `1` by running the following command: + +```console +$ ./gradlew run --args="GetCustomerInfo 1" +``` + +You should see the following output, which shows that a payment was applied to customer ID `1`, reducing the `credit_total` amount: + +```console +... +{"id": 1, "name": "Yamada Taro", "credit_limit": 10000, "credit_total": 2000} +... +``` + +Now that customer ID `1` has made a payment, place an order for one grape and one melon by running the following command: + +```console +$ ./gradlew run --args="PlaceOrder 1 3:1,4:1" +``` + +You should see a similar output as below, with a different UUID for `order_id`, which confirms that the order was successful: + +``` +... +{"order_id": "8911cab3-1c2b-4322-9386-adb1c024e078"} +... +``` + +## Stop the sample application + +To stop the sample application, stop the Docker container by running the following command: + +```console +$ docker-compose down +``` diff --git a/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/Sample.class b/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/Sample.class new file mode 100644 index 00000000..6ba9b459 Binary files /dev/null and b/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/Sample.class differ diff --git a/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/command/GetCustomerInfoCommand.class b/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/command/GetCustomerInfoCommand.class new file mode 100644 index 00000000..27292c51 Binary files /dev/null and b/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/command/GetCustomerInfoCommand.class differ diff --git a/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/command/GetOrderCommand.class b/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/command/GetOrderCommand.class new file mode 100644 index 00000000..e7a4a5f0 Binary files /dev/null and b/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/command/GetOrderCommand.class differ diff --git a/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/command/GetOrdersCommand.class b/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/command/GetOrdersCommand.class new file mode 100644 index 00000000..b0c26728 Binary files /dev/null and b/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/command/GetOrdersCommand.class differ diff --git a/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/command/LoadInitialDataCommand.class b/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/command/LoadInitialDataCommand.class new file mode 100644 index 00000000..e7146eea Binary files /dev/null and b/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/command/LoadInitialDataCommand.class differ diff --git a/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/command/PlaceOrderCommand.class b/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/command/PlaceOrderCommand.class new file mode 100644 index 00000000..1d82fe15 Binary files /dev/null and b/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/command/PlaceOrderCommand.class differ diff --git a/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/command/RepaymentCommand.class b/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/command/RepaymentCommand.class new file mode 100644 index 00000000..b2ba7f3c Binary files /dev/null and b/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/command/RepaymentCommand.class differ diff --git a/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/command/SampleCommand.class b/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/command/SampleCommand.class new file mode 100644 index 00000000..ecc0fcee Binary files /dev/null and b/docs/3.12/scalardb-samples/scalardb-sample/bin/main/sample/command/SampleCommand.class differ diff --git a/docs/3.12/scalardb-samples/scalardb-sample/build.gradle b/docs/3.12/scalardb-samples/scalardb-sample/build.gradle new file mode 100644 index 00000000..e9af1fbb --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sample/build.gradle @@ -0,0 +1,25 @@ +plugins { + id 'java' + id 'application' +} + +group 'org.sample' +version '1.0-SNAPSHOT' + +repositories { + mavenCentral() +} + +dependencies { + implementation 'com.scalar-labs:scalardb:3.9.1' + implementation 'info.picocli:picocli:4.7.1' +} + +application { + mainClassName = 'sample.command.SampleCommand' +} + +archivesBaseName = "sample" + +sourceCompatibility = 1.8 +targetCompatibility = 1.8 diff --git a/docs/3.12/scalardb-samples/scalardb-sample/database.properties b/docs/3.12/scalardb-samples/scalardb-sample/database.properties new file mode 100644 index 00000000..a44993ae --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sample/database.properties @@ -0,0 +1,4 @@ +scalar.db.storage=cassandra +scalar.db.contact_points=localhost +scalar.db.username=cassandra +scalar.db.password=cassandra diff --git a/docs/3.12/scalardb-samples/scalardb-sample/docker-compose.yml b/docs/3.12/scalardb-samples/scalardb-sample/docker-compose.yml new file mode 100644 index 00000000..2eb12b92 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sample/docker-compose.yml @@ -0,0 +1,7 @@ +version: "3.5" +services: + cassandra: + image: cassandra:3.11 + container_name: "cassandra-1" + ports: + - "9042:9042" diff --git a/docs/3.12/scalardb-samples/scalardb-sample/gradle/wrapper/gradle-wrapper.jar b/docs/3.12/scalardb-samples/scalardb-sample/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 00000000..7454180f Binary files /dev/null and b/docs/3.12/scalardb-samples/scalardb-sample/gradle/wrapper/gradle-wrapper.jar differ diff --git a/docs/3.12/scalardb-samples/scalardb-sample/gradle/wrapper/gradle-wrapper.properties b/docs/3.12/scalardb-samples/scalardb-sample/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 00000000..070cb702 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sample/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,5 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-7.6-bin.zip +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/docs/3.12/scalardb-samples/scalardb-sample/gradlew b/docs/3.12/scalardb-samples/scalardb-sample/gradlew new file mode 100755 index 00000000..744e882e --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sample/gradlew @@ -0,0 +1,185 @@ +#!/usr/bin/env sh + +# +# Copyright 2015 the original author or authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn () { + echo "$*" +} + +die () { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MSYS* | MINGW* ) + msys=true + ;; + NONSTOP* ) + nonstop=true + ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin or MSYS, switch paths to Windows format before running java +if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=`expr $i + 1` + done + case $i in + 0) set -- ;; + 1) set -- "$args0" ;; + 2) set -- "$args0" "$args1" ;; + 3) set -- "$args0" "$args1" "$args2" ;; + 4) set -- "$args0" "$args1" "$args2" "$args3" ;; + 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Escape application args +save () { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " +} +APP_ARGS=`save "$@"` + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +exec "$JAVACMD" "$@" diff --git a/docs/3.12/scalardb-samples/scalardb-sample/gradlew.bat b/docs/3.12/scalardb-samples/scalardb-sample/gradlew.bat new file mode 100644 index 00000000..107acd32 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sample/gradlew.bat @@ -0,0 +1,89 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto execute + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/docs/3.12/scalardb-samples/scalardb-sample/images/ERD.png b/docs/3.12/scalardb-samples/scalardb-sample/images/ERD.png new file mode 100644 index 00000000..1a6d13c5 Binary files /dev/null and b/docs/3.12/scalardb-samples/scalardb-sample/images/ERD.png differ diff --git a/docs/3.12/scalardb-samples/scalardb-sample/schema.json b/docs/3.12/scalardb-samples/scalardb-sample/schema.json new file mode 100644 index 00000000..0f8cc99c --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sample/schema.json @@ -0,0 +1,56 @@ +{ + "sample.customers": { + "transaction": true, + "partition-key": [ + "customer_id" + ], + "columns": { + "customer_id": "INT", + "name": "TEXT", + "credit_limit": "INT", + "credit_total": "INT" + } + }, + "sample.orders": { + "transaction": true, + "partition-key": [ + "customer_id" + ], + "clustering-key": [ + "timestamp" + ], + "secondary-index": [ + "order_id" + ], + "columns": { + "order_id": "TEXT", + "customer_id": "INT", + "timestamp": "BIGINT" + } + }, + "sample.statements": { + "transaction": true, + "partition-key": [ + "order_id" + ], + "clustering-key": [ + "item_id" + ], + "columns": { + "order_id": "TEXT", + "item_id": "INT", + "count": "INT" + } + }, + "sample.items": { + "transaction": true, + "partition-key": [ + "item_id" + ], + "columns": { + "item_id": "INT", + "name": "TEXT", + "price": "INT" + } + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-sample/settings.gradle b/docs/3.12/scalardb-samples/scalardb-sample/settings.gradle new file mode 100644 index 00000000..85fcc25b --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sample/settings.gradle @@ -0,0 +1 @@ +rootProject.name = 'scalardb-sample' diff --git a/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/Sample.java b/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/Sample.java new file mode 100644 index 00000000..f0763433 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/Sample.java @@ -0,0 +1,414 @@ +package sample; + +import com.scalar.db.api.DistributedTransaction; +import com.scalar.db.api.DistributedTransactionManager; +import com.scalar.db.api.Get; +import com.scalar.db.api.Put; +import com.scalar.db.api.Result; +import com.scalar.db.api.Scan; +import com.scalar.db.exception.transaction.TransactionException; +import com.scalar.db.io.Key; +import com.scalar.db.service.TransactionFactory; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.UUID; + +public class Sample implements AutoCloseable { + + private final DistributedTransactionManager manager; + + public Sample() throws IOException { + // Create a transaction manager object + TransactionFactory factory = TransactionFactory.create("database.properties"); + manager = factory.getTransactionManager(); + } + + public void loadInitialData() throws TransactionException { + DistributedTransaction transaction = null; + try { + transaction = manager.start(); + loadCustomerIfNotExists(transaction, 1, "Yamada Taro", 10000, 0); + loadCustomerIfNotExists(transaction, 2, "Yamada Hanako", 10000, 0); + loadCustomerIfNotExists(transaction, 3, "Suzuki Ichiro", 10000, 0); + loadItemIfNotExists(transaction, 1, "Apple", 1000); + loadItemIfNotExists(transaction, 2, "Orange", 2000); + loadItemIfNotExists(transaction, 3, "Grape", 2500); + loadItemIfNotExists(transaction, 4, "Mango", 5000); + loadItemIfNotExists(transaction, 5, "Melon", 3000); + transaction.commit(); + } catch (TransactionException e) { + if (transaction != null) { + // If an error occurs, abort the transaction + transaction.abort(); + } + throw e; + } + } + + private void loadCustomerIfNotExists( + DistributedTransaction transaction, + int customerId, + String name, + int creditLimit, + int creditTotal) + throws TransactionException { + Optional customer = + transaction.get( + Get.newBuilder() + .namespace("sample") + .table("customers") + .partitionKey(Key.ofInt("customer_id", customerId)) + .build()); + if (!customer.isPresent()) { + transaction.put( + Put.newBuilder() + .namespace("sample") + .table("customers") + .partitionKey(Key.ofInt("customer_id", customerId)) + .textValue("name", name) + .intValue("credit_limit", creditLimit) + .intValue("credit_total", creditTotal) + .build()); + } + } + + private void loadItemIfNotExists( + DistributedTransaction transaction, int itemId, String name, int price) + throws TransactionException { + Optional item = + transaction.get( + Get.newBuilder() + .namespace("sample") + .table("items") + .partitionKey(Key.ofInt("item_id", itemId)) + .build()); + if (!item.isPresent()) { + transaction.put( + Put.newBuilder() + .namespace("sample") + .table("items") + .partitionKey(Key.ofInt("item_id", itemId)) + .textValue("name", name) + .intValue("price", price) + .build()); + } + } + + public String getCustomerInfo(int customerId) throws TransactionException { + DistributedTransaction transaction = null; + try { + // Start a transaction + transaction = manager.start(); + + // Retrieve the customer info for the specified customer ID from the customers table + Optional customer = + transaction.get( + Get.newBuilder() + .namespace("sample") + .table("customers") + .partitionKey(Key.ofInt("customer_id", customerId)) + .build()); + + if (!customer.isPresent()) { + // If the customer info the specified customer ID doesn't exist, throw an exception + throw new RuntimeException("Customer not found"); + } + + // Commit the transaction (even when the transaction is read-only, we need to commit) + transaction.commit(); + + // Return the customer info as a JSON format + return String.format( + "{\"id\": %d, \"name\": \"%s\", \"credit_limit\": %d, \"credit_total\": %d}", + customerId, + customer.get().getText("name"), + customer.get().getInt("credit_limit"), + customer.get().getInt("credit_total")); + } catch (Exception e) { + if (transaction != null) { + // If an error occurs, abort the transaction + transaction.abort(); + } + throw e; + } + } + + public String placeOrder(int customerId, int[] itemIds, int[] itemCounts) + throws TransactionException { + assert itemIds.length == itemCounts.length; + + DistributedTransaction transaction = null; + try { + String orderId = UUID.randomUUID().toString(); + + // Start a transaction + transaction = manager.start(); + + // Put the order info into the orders table + transaction.put( + Put.newBuilder() + .namespace("sample") + .table("orders") + .partitionKey(Key.ofInt("customer_id", customerId)) + .clusteringKey(Key.ofBigInt("timestamp", System.currentTimeMillis())) + .textValue("order_id", orderId) + .build()); + + int amount = 0; + for (int i = 0; i < itemIds.length; i++) { + int itemId = itemIds[i]; + int count = itemCounts[i]; + + // Put the order statement into the statements table + transaction.put( + Put.newBuilder() + .namespace("sample") + .table("statements") + .partitionKey(Key.ofText("order_id", orderId)) + .clusteringKey(Key.ofInt("item_id", itemId)) + .intValue("count", count) + .build()); + + // Retrieve the item info from the items table + Optional item = + transaction.get( + Get.newBuilder() + .namespace("sample") + .table("items") + .partitionKey(Key.ofInt("item_id", itemId)) + .build()); + if (!item.isPresent()) { + throw new RuntimeException("Item not found"); + } + + // Calculate the total amount + amount += item.get().getInt("price") * count; + } + + // Check if the credit total exceeds the credit limit after payment + Optional customer = + transaction.get( + Get.newBuilder() + .namespace("sample") + .table("customers") + .partitionKey(Key.ofInt("customer_id", customerId)) + .build()); + if (!customer.isPresent()) { + throw new RuntimeException("Customer not found"); + } + int creditLimit = customer.get().getInt("credit_limit"); + int creditTotal = customer.get().getInt("credit_total"); + if (creditTotal + amount > creditLimit) { + throw new RuntimeException("Credit limit exceeded"); + } + + // Update credit_total for the customer + transaction.put( + Put.newBuilder() + .namespace("sample") + .table("customers") + .partitionKey(Key.ofInt("customer_id", customerId)) + .intValue("credit_total", creditTotal + amount) + .build()); + + // Commit the transaction + transaction.commit(); + + // Return the order id + return String.format("{\"order_id\": \"%s\"}", orderId); + } catch (Exception e) { + if (transaction != null) { + // If an error occurs, abort the transaction + transaction.abort(); + } + throw e; + } + } + + private String getOrderJson(DistributedTransaction transaction, String orderId) + throws TransactionException { + // Retrieve the order info for the order ID from the orders table + Optional order = + transaction.get( + Get.newBuilder() + .namespace("sample") + .table("orders") + .indexKey(Key.ofText("order_id", orderId)) + .build()); + + if (!order.isPresent()) { + throw new RuntimeException("Order not found"); + } + + int customerId = order.get().getInt("customer_id"); + + // Retrieve the customer info for the specified customer ID from the customers table + Optional customer = + transaction.get( + Get.newBuilder() + .namespace("sample") + .table("customers") + .partitionKey(Key.ofInt("customer_id", customerId)) + .build()); + assert customer.isPresent(); + + // Retrieve the order statements for the order ID from the statements table + List statements = + transaction.scan( + Scan.newBuilder() + .namespace("sample") + .table("statements") + .partitionKey(Key.ofText("order_id", orderId)) + .build()); + + // Make the statements JSONs + List statementJsons = new ArrayList<>(); + int total = 0; + for (Result statement : statements) { + int itemId = statement.getInt("item_id"); + + // Retrieve the item data from the items table + Optional item = + transaction.get( + Get.newBuilder() + .namespace("sample") + .table("items") + .partitionKey(Key.ofInt("item_id", itemId)) + .build()); + + if (!item.isPresent()) { + throw new RuntimeException("Item not found"); + } + + int price = item.get().getInt("price"); + int count = statement.getInt("count"); + + statementJsons.add( + String.format( + "{\"item_id\": %d,\"item_name\": \"%s\",\"price\": %d,\"count\": %d,\"total\": %d}", + itemId, item.get().getText("name"), price, count, price * count)); + + total += price * count; + } + + // Return the order info as a JSON format + return String.format( + "{\"order_id\": \"%s\",\"timestamp\": %d,\"customer_id\": %d,\"customer_name\": \"%s\",\"statement\": [%s],\"total\": %d}", + orderId, + order.get().getBigInt("timestamp"), + customerId, + customer.get().getText("name"), + String.join(",", statementJsons), + total); + } + + public String getOrderByOrderId(String orderId) throws TransactionException { + DistributedTransaction transaction = null; + try { + // Start a transaction + transaction = manager.start(); + + // Get an order JSON for the specified order ID + String orderJson = getOrderJson(transaction, orderId); + + // Commit the transaction (even when the transaction is read-only, we need to commit) + transaction.commit(); + + // Return the order info as a JSON format + return String.format("{\"order\": %s}", orderJson); + } catch (Exception e) { + if (transaction != null) { + // If an error occurs, abort the transaction + transaction.abort(); + } + throw e; + } + } + + public String getOrdersByCustomerId(int customerId) throws TransactionException { + DistributedTransaction transaction = null; + try { + // Start a transaction + transaction = manager.start(); + + // Retrieve the order info for the customer ID from the orders table + List orders = + transaction.scan( + Scan.newBuilder() + .namespace("sample") + .table("orders") + .partitionKey(Key.ofInt("customer_id", customerId)) + .build()); + + // Make order JSONs for the orders of the customer + List orderJsons = new ArrayList<>(); + for (Result order : orders) { + orderJsons.add(getOrderJson(transaction, order.getText("order_id"))); + } + + // Commit the transaction (even when the transaction is read-only, we need to commit) + transaction.commit(); + + // Return the order info as a JSON format + return String.format("{\"order\": [%s]}", String.join(",", orderJsons)); + } catch (Exception e) { + if (transaction != null) { + // If an error occurs, abort the transaction + transaction.abort(); + } + throw e; + } + } + + public void repayment(int customerId, int amount) throws TransactionException { + DistributedTransaction transaction = null; + try { + // Start a transaction + transaction = manager.start(); + + // Retrieve the customer info for the specified customer ID from the customers table + Optional customer = + transaction.get( + Get.newBuilder() + .namespace("sample") + .table("customers") + .partitionKey(Key.ofInt("customer_id", customerId)) + .build()); + if (!customer.isPresent()) { + throw new RuntimeException("Customer not found"); + } + + int updatedCreditTotal = customer.get().getInt("credit_total") - amount; + + // Check if over repayment or not + if (updatedCreditTotal < 0) { + throw new RuntimeException("Over repayment"); + } + + // Reduce credit_total for the customer + transaction.put( + Put.newBuilder() + .namespace("sample") + .table("customers") + .partitionKey(Key.ofInt("customer_id", customerId)) + .intValue("credit_total", updatedCreditTotal) + .build()); + + // Commit the transaction + transaction.commit(); + } catch (Exception e) { + if (transaction != null) { + // If an error occurs, abort the transaction + transaction.abort(); + } + throw e; + } + } + + @Override + public void close() { + manager.close(); + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/command/GetCustomerInfoCommand.java b/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/command/GetCustomerInfoCommand.java new file mode 100644 index 00000000..8c397e12 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/command/GetCustomerInfoCommand.java @@ -0,0 +1,21 @@ +package sample.command; + +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.Sample; + +@Command(name = "GetCustomerInfo", description = "Get customer information") +public class GetCustomerInfoCommand implements Callable { + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Override + public Integer call() throws Exception { + try (Sample sample = new Sample()) { + System.out.println(sample.getCustomerInfo(customerId)); + } + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/command/GetOrderCommand.java b/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/command/GetOrderCommand.java new file mode 100644 index 00000000..abc94537 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/command/GetOrderCommand.java @@ -0,0 +1,21 @@ +package sample.command; + +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.Sample; + +@Command(name = "GetOrder", description = "Get order information by order ID") +public class GetOrderCommand implements Callable { + + @Parameters(index = "0", paramLabel = "ORDER_ID", description = "order ID") + private String orderId; + + @Override + public Integer call() throws Exception { + try (Sample sample = new Sample()) { + System.out.println(sample.getOrderByOrderId(orderId)); + } + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/command/GetOrdersCommand.java b/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/command/GetOrdersCommand.java new file mode 100644 index 00000000..428a1a83 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/command/GetOrdersCommand.java @@ -0,0 +1,21 @@ +package sample.command; + +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.Sample; + +@Command(name = "GetOrders", description = "Get order information by customer ID") +public class GetOrdersCommand implements Callable { + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Override + public Integer call() throws Exception { + try (Sample sample = new Sample()) { + System.out.println(sample.getOrdersByCustomerId(customerId)); + } + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/command/LoadInitialDataCommand.java b/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/command/LoadInitialDataCommand.java new file mode 100644 index 00000000..32f9088e --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/command/LoadInitialDataCommand.java @@ -0,0 +1,17 @@ +package sample.command; + +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import sample.Sample; + +@Command(name = "LoadInitialData", description = "Load initial data") +public class LoadInitialDataCommand implements Callable { + + @Override + public Integer call() throws Exception { + try (Sample sample = new Sample()) { + sample.loadInitialData(); + } + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/command/PlaceOrderCommand.java b/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/command/PlaceOrderCommand.java new file mode 100644 index 00000000..929b50b4 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/command/PlaceOrderCommand.java @@ -0,0 +1,38 @@ +package sample.command; + +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.Sample; + +@Command(name = "PlaceOrder", description = "Place an order") +public class PlaceOrderCommand implements Callable { + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Parameters( + index = "1", + paramLabel = "ORDERS", + description = "orders. The format is \":,:,...\"") + private String orders; + + @Override + public Integer call() throws Exception { + String[] split = orders.split(",", -1); + int[] itemIds = new int[split.length]; + int[] itemCounts = new int[split.length]; + + for (int i = 0; i < split.length; i++) { + String[] s = split[i].split(":", -1); + itemIds[i] = Integer.parseInt(s[0]); + itemCounts[i] = Integer.parseInt(s[1]); + } + + try (Sample sample = new Sample()) { + System.out.println(sample.placeOrder(customerId, itemIds, itemCounts)); + } + + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/command/RepaymentCommand.java b/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/command/RepaymentCommand.java new file mode 100644 index 00000000..868b1748 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/command/RepaymentCommand.java @@ -0,0 +1,24 @@ +package sample.command; + +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.Sample; + +@Command(name = "Repayment", description = "Repayment") +public class RepaymentCommand implements Callable { + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Parameters(index = "1", paramLabel = "AMOUNT", description = "amount of the money for repayment") + private int amount; + + @Override + public Integer call() throws Exception { + try (Sample sample = new Sample()) { + sample.repayment(customerId, amount); + } + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/command/SampleCommand.java b/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/command/SampleCommand.java new file mode 100644 index 00000000..0dfdf690 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sample/src/main/java/sample/command/SampleCommand.java @@ -0,0 +1,37 @@ +package sample.command; + +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Option; + +@Command( + name = "bin/sample", + description = "Sample application for Microservice Transaction", + subcommands = { + LoadInitialDataCommand.class, + PlaceOrderCommand.class, + GetOrderCommand.class, + GetOrdersCommand.class, + GetCustomerInfoCommand.class, + RepaymentCommand.class + }) +public class SampleCommand implements Runnable { + + @Option( + names = {"-h", "--help"}, + usageHelp = true, + description = "Displays this help message and quits.", + defaultValue = "true") + private Boolean showHelp; + + @Override + public void run() { + if (showHelp) { + CommandLine.usage(this, System.out); + } + } + + public static void main(String[] args) { + new CommandLine(new SampleCommand()).execute(args); + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/README.md b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/README.md new file mode 100644 index 00000000..788e99d1 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/README.md @@ -0,0 +1,359 @@ +# Create a Sample Application That Uses ScalarDB SQL (JDBC) + +{% capture notice--info %} +**Note** + +This sample code is now deprecated. To use ScalarDB SQL, you need to use ScalarDB Cluster, which is available only in the Enterprise edition. For more information, see [ScalarDB Cluster](https://scalardb.scalar-labs.com/docs/latest/scalardb-cluster/). +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +This tutorial describes how to create a sample e-commerce application that uses ScalarDB SQL (JDBC). + +## Overview + +The sample e-commerce application shows how users can order and pay for items by using a line of credit. The use case described in this tutorial is the same as the basic [ScalarDB sample](../scalardb-sample/README.md) but uses ScalarDB SQL (JDBC). + +The database that you will be using in the sample application is Cassandra. Although Cassandra does not provide ACID transaction capabilities, you can make transactions ACID compliant by having your application connect to the database through ScalarDB SQL (JDBC). + +{% capture notice--info %} +**Note** + +Since the focus of the sample application is to demonstrate using ScalarDB SQL (JDBC), application-specific error handling, authentication processing, and similar functions are not included in the sample application. For details about exception handling in ScalarDB SQL (JDBC), see [Handle SQLException](https://github.com/scalar-labs/scalardb-sql/blob/main/docs/jdbc-guide.md#handle-sqlexception). +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### What you can do in this sample application + +The sample application supports the following types of transactions: + +- Get customer information. +- Place an order by using a line of credit. + - Checks if the cost of the order is below the customer's credit limit. + - If the check passes, records the order history and updates the amount the customer has spent. +- Get order information by order ID. +- Get order information by customer ID. +- Make a payment. + - Reduces the amount the customer has spent. + +## Prerequisites + +- One of the following Java Development Kits (JDKs): + - [Oracle JDK](https://www.oracle.com/java/technologies/downloads/) LTS version (8, 11, or 17) + - [OpenJDK](https://openjdk.org/install/) LTS version (8, 11, or 17) +- [Docker](https://www.docker.com/get-started/) 20.10 or later with [Docker Compose](https://docs.docker.com/compose/install/) V2 or later + +{% capture notice--info %} +**Note** + +We recommend using the LTS versions mentioned above, but other non-LTS versions may work. + +In addition, other JDKs should work with ScalarDB, but we haven't tested them. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +In addition, you need access to the [ScalarDB SQL repository on GitHub](https://github.com/scalar-labs/scalardb-sql) and the [packages in the ScalarDB SQL repository](https://github.com/orgs/scalar-labs/packages?repo_name=scalardb-sql), which are private. The packages and repository are available only those who are using ScalarDB Enterprise. If you need a license for ScalarDB Enterprise, please [contact us](https://scalar-labs.com/contact_us/). + +After confirming that you have access to the ScalarDB SQL repository and its packages, you will need to set your GitHub username and your personal access token. To specify these properties, you can do one of the following: + +
+
+ + +
+ +
+ +Specify the properties with the `-P` option by running the `./gradlew` command as follows, replacing `` with your GitHub username and `` with your personal access token: + +```console +$ ./gradlew run -Pgpr.user= -Pgpr.key= +``` + +
+
+ +Specify the properties as environment variables by running the following commands, replacing `` with your GitHub username and `` with your personal access token: + +```console +$ export USERNAME= +$ export TOKEN= +``` + +
+
+ +For more details, see [how to install ScalarDB SQL](https://github.com/scalar-labs/scalardb-sql#install). + +## Set up ScalarDB SQL (JDBC) + +The following sections describe how to set up the sample e-commerce application. + +### Clone the ScalarDB samples repository + +Open **Terminal**, then clone the ScalarDB samples repository by running the following command: + +```console +$ git clone https://github.com/scalar-labs/scalardb-samples +``` + +Then, go to the directory that contains the sample application by running the following command: + +```console +$ cd scalardb-samples/scalardb-sql-jdbc-sample +``` + +### Start Cassandra + +Cassandra is already configured for the sample application, as shown in [`scalardb-sql.properties`](scalardb-sql.properties). + +To start Cassandra, which is included in the Docker container for the sample application, make sure Docker is running and then run the following command: + +```console +$ docker-compose up -d +``` + +{% capture notice--info %} +**Note** + +Starting the Docker container may take more than one minute depending on your development environment. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +### Load the schema + +The database schema (the method in which the data will be organized) for the sample application has already been defined in [`schema.sql`](schema.sql). + +To apply the schema, go to the [ScalarDB SQL Releases](https://github.com/scalar-labs/scalardb-sql/releases) page and download the CLI tool (`scalardb-sql-cli--all.jar`) for the version of ScalarDB SQL that you want to use to the `scalardb-samples/scalardb-sql-jdbc-sample` folder. + +Then, run the following command, replacing `` with the version of the ScalarDB Schema Loader that you downloaded: + +```console +$ java -jar scalardb-sql-cli--all.jar --config scalardb-sql.properties --file schema.sql +``` + +#### Schema details + +As shown in [`schema.sql`](schema.sql) for the sample application, all the tables are created in the `sample` namespace. + +- `sample.customers`: a table that manages customers' information + - `credit_limit`: the maximum amount of money a lender will allow each customer to spend when using a line of credit + - `credit_total`: the amount of money that each customer has already spent by using their line of credit +- `sample.orders`: a table that manages order information +- `sample.statements`: a table that manages order statement information +- `sample.items`: a table that manages information of items to be ordered + +The Entity Relationship Diagram for the schema is as follows: + +![ERD](images/ERD.png) + +### Load the initial data + +After the Docker container has started, load the initial data by running the following command: + +```console +$ ./gradlew run --args="LoadInitialData" +``` + +After the initial data has loaded, the following records should be stored in the tables. + +**`sample.customers` table** + +| customer_id | name | credit_limit | credit_total | +|-------------|---------------|--------------|--------------| +| 1 | Yamada Taro | 10000 | 0 | +| 2 | Yamada Hanako | 10000 | 0 | +| 3 | Suzuki Ichiro | 10000 | 0 | + +**`sample.items` table** + +| item_id | name | price | +|---------|--------|-------| +| 1 | Apple | 1000 | +| 2 | Orange | 2000 | +| 3 | Grape | 2500 | +| 4 | Mango | 5000 | +| 5 | Melon | 3000 | + +## Execute transactions and retrieve data in the sample application + +The following sections describe how to execute transactions and retrieve data in the sample e-commerce application. + +### Get customer information + +Start with getting information about the customer whose ID is `1` by running the following command: + +```console +$ ./gradlew run --args="GetCustomerInfo 1" +``` + +You should see the following output: + +```console +... +{"id": 1, "name": "Yamada Taro", "credit_limit": 10000, "credit_total": 0} +... +``` + +### Place an order + +Then, have customer ID `1` place an order for three apples and two oranges by running the following command: + +{% capture notice--info %} +**Note** + +The order format in this command is `./gradlew run --args="PlaceOrder :,:,..."`. +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +```console +$ ./gradlew run --args="PlaceOrder 1 1:3,2:2" +``` + +You should see a similar output as below, with a different UUID for `order_id`, which confirms that the order was successful: + +```console +... +{"order_id": "454f9c97-f456-44fd-96da-f527187fe39b"} +... +``` + +### Check order details + +Check details about the order by running the following command, replacing `` with the UUID for the `order_id` that was shown after running the previous command: + +```console +$ ./gradlew run --args="GetOrder " +``` + +You should see a similar output as below, with different UUIDs for `order_id` and `timestamp`: + +```console +... +{"order": {"order_id": "454f9c97-f456-44fd-96da-f527187fe39b","timestamp": 1685602722821,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 1, "name": "Apple", "price": 1000, "count": 3},{"item_id": 2, "name": "Orange", "price": 2000, "count": 2}],"total": 7000}} +... +``` + +### Place another order + +Place an order for one melon that uses the remaining amount in `credit_total` for customer ID `1` by running the following command: + +```console +$ ./gradlew run --args="PlaceOrder 1 5:1" +``` + +You should see a similar output as below, with a different UUID for `order_id`, which confirms that the order was successful: + +```console +... +{"order_id": "3f40c718-59ec-48aa-a6fe-2fdaf12ad094"} +... +``` + +### Check order history + +Get the history of all orders for customer ID `1` by running the following command: + +```console +$ ./gradlew run --args="GetOrders 1" +``` + +You should see a similar output as below, with different UUIDs for `order_id` and `timestamp`, which shows the history of all orders for customer ID `1` in descending order by timestamp: + +```console +... +{"order": [{"order_id": "454f9c97-f456-44fd-96da-f527187fe39b","timestamp": 1685602722821,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 1, "name": "Apple", "price": 1000, "count": 3},{"item_id": 2, "name": "Orange", "price": 2000, "count": 2}],"total": 7000},{"order_id": "3f40c718-59ec-48aa-a6fe-2fdaf12ad094","timestamp": 1685602811718,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 5, "name": "Melon", "price": 3000, "count": 1}],"total": 3000}]} +... +``` + +### Check credit total + +Get the credit total for customer ID `1` by running the following command: + +```console +$ ./gradlew run --args="GetCustomerInfo 1" +``` + +You should see the following output, which shows that customer ID `1` has reached their `credit_limit` in `credit_total` and cannot place anymore orders: + +```console +... +{"id": 1, "name": "Yamada Taro", "credit_limit": 10000, "credit_total": 10000} +... +``` + +Try to place an order for one grape and one mango by running the following command: + +```console +$ ./gradlew run --args="PlaceOrder 1 3:1,4:1" +``` + +You should see the following output, which shows that the order failed because the `credit_total` amount would exceed the `credit_limit` amount: + +```console +... +java.lang.RuntimeException: Credit limit exceeded + at sample.Sample.placeOrder(Sample.java:184) + at sample.command.PlaceOrderCommand.call(PlaceOrderCommand.java:32) + at sample.command.PlaceOrderCommand.call(PlaceOrderCommand.java:8) + at picocli.CommandLine.executeUserObject(CommandLine.java:2041) + at picocli.CommandLine.access$1500(CommandLine.java:148) + at picocli.CommandLine$RunLast.executeUserObjectOfLastSubcommandWithSameParent(CommandLine.java:2461) + at picocli.CommandLine$RunLast.handle(CommandLine.java:2453) + at picocli.CommandLine$RunLast.handle(CommandLine.java:2415) + at picocli.CommandLine$AbstractParseResultHandler.execute(CommandLine.java:2273) + at picocli.CommandLine$RunLast.execute(CommandLine.java:2417) + at picocli.CommandLine.execute(CommandLine.java:2170) + at sample.command.SampleCommand.main(SampleCommand.java:35) +... +``` + +### Make a payment + +To continue making orders, customer ID `1` must make a payment to reduce the `credit_total` amount. + +Make a payment by running the following command: + +```console +$ ./gradlew run --args="Repayment 1 8000" +``` + +Then, check the `credit_total` amount for customer ID `1` by running the following command: + +```console +$ ./gradlew run --args="GetCustomerInfo 1" +``` + +You should see the following output, which shows that a payment was applied to customer ID `1`, reducing the `credit_total` amount: + +```console +... +{"id": 1, "name": "Yamada Taro", "credit_limit": 10000, "credit_total": 2000} +... +``` + +Now that customer ID `1` has made a payment, place an order for one grape and one melon by running the following command: + +```console +$ ./gradlew run --args="PlaceOrder 1 3:1,4:1" +``` + +You should see a similar output as below, with a different UUID for `order_id`, which confirms that the order was successful: + +```console +... +{"order_id": "fb71279d-88ea-4974-a102-0ec4e7d65e25"} +... +``` + +## Stop the sample application + +To stop the sample application, stop the Docker container by running the following command: + +```console +$ docker-compose down +``` diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/Sample.class b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/Sample.class new file mode 100644 index 00000000..c9e061f4 Binary files /dev/null and b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/Sample.class differ diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/command/GetCustomerInfoCommand.class b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/command/GetCustomerInfoCommand.class new file mode 100644 index 00000000..273ee4ae Binary files /dev/null and b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/command/GetCustomerInfoCommand.class differ diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/command/GetOrderCommand.class b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/command/GetOrderCommand.class new file mode 100644 index 00000000..ad0354ac Binary files /dev/null and b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/command/GetOrderCommand.class differ diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/command/GetOrdersCommand.class b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/command/GetOrdersCommand.class new file mode 100644 index 00000000..b0d17e76 Binary files /dev/null and b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/command/GetOrdersCommand.class differ diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/command/LoadInitialDataCommand.class b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/command/LoadInitialDataCommand.class new file mode 100644 index 00000000..26f26ea5 Binary files /dev/null and b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/command/LoadInitialDataCommand.class differ diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/command/PlaceOrderCommand.class b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/command/PlaceOrderCommand.class new file mode 100644 index 00000000..9b9c96c1 Binary files /dev/null and b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/command/PlaceOrderCommand.class differ diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/command/RepaymentCommand.class b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/command/RepaymentCommand.class new file mode 100644 index 00000000..dd4f98df Binary files /dev/null and b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/command/RepaymentCommand.class differ diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/command/SampleCommand.class b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/command/SampleCommand.class new file mode 100644 index 00000000..ecc0fcee Binary files /dev/null and b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/bin/main/sample/command/SampleCommand.class differ diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/build.gradle b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/build.gradle new file mode 100644 index 00000000..bf59be4e --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/build.gradle @@ -0,0 +1,33 @@ +plugins { + id 'java' + id 'application' +} + +group 'org.sample' +version '1.0-SNAPSHOT' + +repositories { + mavenCentral() + maven { + url = uri("https://maven.pkg.github.com/scalar-labs/scalardb-sql") + credentials { + username = project.findProperty("gpr.user") ?: System.getenv("USERNAME") + password = project.findProperty("gpr.key") ?: System.getenv("TOKEN") + } + } +} + +dependencies { + implementation 'com.scalar-labs:scalardb-sql-jdbc:3.9.0' + implementation 'com.scalar-labs:scalardb-sql-direct-mode:3.9.0' + implementation 'info.picocli:picocli:4.7.1' +} + +application { + mainClassName = 'sample.command.SampleCommand' +} + +archivesBaseName = "sample" + +sourceCompatibility = 1.8 +targetCompatibility = 1.8 diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/docker-compose.yml b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/docker-compose.yml new file mode 100644 index 00000000..2eb12b92 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/docker-compose.yml @@ -0,0 +1,7 @@ +version: "3.5" +services: + cassandra: + image: cassandra:3.11 + container_name: "cassandra-1" + ports: + - "9042:9042" diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/gradle/wrapper/gradle-wrapper.jar b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 00000000..41d9927a Binary files /dev/null and b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/gradle/wrapper/gradle-wrapper.jar differ diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/gradle/wrapper/gradle-wrapper.properties b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 00000000..070cb702 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,5 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-7.6-bin.zip +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/gradlew b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/gradlew new file mode 100755 index 00000000..1b6c7873 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/gradlew @@ -0,0 +1,234 @@ +#!/bin/sh + +# +# Copyright © 2015-2021 the original authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################## +# +# Gradle start up script for POSIX generated by Gradle. +# +# Important for running: +# +# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is +# noncompliant, but you have some other compliant shell such as ksh or +# bash, then to run this script, type that shell name before the whole +# command line, like: +# +# ksh Gradle +# +# Busybox and similar reduced shells will NOT work, because this script +# requires all of these POSIX shell features: +# * functions; +# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», +# «${var#prefix}», «${var%suffix}», and «$( cmd )»; +# * compound commands having a testable exit status, especially «case»; +# * various built-in commands including «command», «set», and «ulimit». +# +# Important for patching: +# +# (2) This script targets any POSIX shell, so it avoids extensions provided +# by Bash, Ksh, etc; in particular arrays are avoided. +# +# The "traditional" practice of packing multiple parameters into a +# space-separated string is a well documented source of bugs and security +# problems, so this is (mostly) avoided, by progressively accumulating +# options in "$@", and eventually passing that to Java. +# +# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, +# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; +# see the in-line comments for details. +# +# There are tweaks for specific operating systems such as AIX, CygWin, +# Darwin, MinGW, and NonStop. +# +# (3) This script is generated from the Groovy template +# https://github.com/gradle/gradle/blob/master/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# within the Gradle project. +# +# You can find Gradle at https://github.com/gradle/gradle/. +# +############################################################################## + +# Attempt to set APP_HOME + +# Resolve links: $0 may be a link +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac +done + +APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit + +APP_NAME="Gradle" +APP_BASE_NAME=${0##*/} + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD=maximum + +warn () { + echo "$*" +} >&2 + +die () { + echo + echo "$*" + echo + exit 1 +} >&2 + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD=$JAVA_HOME/jre/sh/java + else + JAVACMD=$JAVA_HOME/bin/java + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD=java + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac +fi + +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. + +# For Cygwin or MSYS, switch paths to Windows format before running java +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) + + # Now convert the arguments - kludge to limit ourselves to /bin/sh + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) + fi + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg + done +fi + +# Collect all arguments for the java command; +# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of +# shell script including quotes and variable substitutions, so put them in +# double quotes to make sure that they get re-expanded; and +# * put everything else in single quotes, so that it's not re-expanded. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# + +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' + +exec "$JAVACMD" "$@" diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/gradlew.bat b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/gradlew.bat new file mode 100644 index 00000000..107acd32 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/gradlew.bat @@ -0,0 +1,89 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto execute + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/images/ERD.png b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/images/ERD.png new file mode 100644 index 00000000..1a6d13c5 Binary files /dev/null and b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/images/ERD.png differ diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/scalardb-sql.properties b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/scalardb-sql.properties new file mode 100644 index 00000000..5461f78f --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/scalardb-sql.properties @@ -0,0 +1,6 @@ +scalar.db.sql.connection_mode=direct + +scalar.db.storage=cassandra +scalar.db.contact_points=localhost +scalar.db.username=cassandra +scalar.db.password=cassandra diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/schema.sql b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/schema.sql new file mode 100644 index 00000000..d3ad7fc5 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/schema.sql @@ -0,0 +1,32 @@ +CREATE COORDINATOR TABLES IF NOT EXIST; + +CREATE NAMESPACE IF NOT EXISTS sample; + +CREATE TABLE IF NOT EXISTS sample.customers ( + customer_id INT PRIMARY KEY, + name TEXT, + credit_limit INT, + credit_total INT +); + +CREATE TABLE IF NOT EXISTS sample.orders ( + customer_id INT, + timestamp BIGINT, + order_id TEXT, + PRIMARY KEY (customer_id, timestamp) +); + +CREATE INDEX IF NOT EXISTS ON sample.orders (order_id); + +CREATE TABLE IF NOT EXISTS sample.statements ( + order_id TEXT, + item_id INT, + count INT, + PRIMARY KEY (order_id, item_id) +); + +CREATE TABLE IF NOT EXISTS sample.items ( + item_id INT PRIMARY KEY, + name TEXT, + price INT +); diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/settings.gradle b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/settings.gradle new file mode 100644 index 00000000..096a9cae --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/settings.gradle @@ -0,0 +1 @@ +rootProject.name = 'scalardb-sql-jdbc-sample' diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/Sample.java b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/Sample.java new file mode 100644 index 00000000..1d317a4f --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/Sample.java @@ -0,0 +1,383 @@ +package sample; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +public class Sample { + + public void loadInitialData() throws SQLException { + try (Connection connection = getConnection()) { + try { + loadCustomerIfNotExists(connection, 1, "Yamada Taro", 10000, 0); + loadCustomerIfNotExists(connection, 2, "Yamada Hanako", 10000, 0); + loadCustomerIfNotExists(connection, 3, "Suzuki Ichiro", 10000, 0); + loadItemIfNotExists(connection, 1, "Apple", 1000); + loadItemIfNotExists(connection, 2, "Orange", 2000); + loadItemIfNotExists(connection, 3, "Grape", 2500); + loadItemIfNotExists(connection, 4, "Mango", 5000); + loadItemIfNotExists(connection, 5, "Melon", 3000); + + // Commit the transaction + connection.commit(); + } catch (Exception e) { + connection.rollback(); + throw e; + } + } + } + + private void loadCustomerIfNotExists( + Connection connection, int customerId, String name, int creditLimit, int creditTotal) + throws SQLException { + try (PreparedStatement preparedStatementForSelect = + connection.prepareStatement("SELECT * FROM sample.customers WHERE customer_id = ?"); + PreparedStatement preparedStatementForInsert = + connection.prepareStatement( + "INSERT INTO sample.customers (customer_id, name, credit_limit, credit_total) VALUES (?, ?, ?, ?)")) { + + preparedStatementForSelect.setInt(1, customerId); + try (ResultSet resultSet = preparedStatementForSelect.executeQuery()) { + if (resultSet.next()) { + // If the customer info for the specified customer ID already exists, do nothing + return; + } + } + + preparedStatementForInsert.setInt(1, customerId); + preparedStatementForInsert.setString(2, name); + preparedStatementForInsert.setInt(3, creditLimit); + preparedStatementForInsert.setInt(4, creditTotal); + preparedStatementForInsert.executeUpdate(); + } + } + + private void loadItemIfNotExists(Connection connection, int itemId, String name, int price) + throws SQLException { + try (PreparedStatement preparedStatementForSelect = + connection.prepareStatement("SELECT * FROM sample.items WHERE item_id = ?"); + PreparedStatement preparedStatementForInsert = + connection.prepareStatement( + "INSERT INTO sample.items (item_id, name, price) VALUES (?, ?, ?)")) { + + preparedStatementForSelect.setInt(1, itemId); + try (ResultSet resultSet = preparedStatementForSelect.executeQuery()) { + if (resultSet.next()) { + // If the item info for the specified item ID already exists, do nothing + return; + } + } + + preparedStatementForInsert.setInt(1, itemId); + preparedStatementForInsert.setString(2, name); + preparedStatementForInsert.setInt(3, price); + preparedStatementForInsert.executeUpdate(); + } + } + + public String getCustomerInfo(int customerId) throws SQLException { + try (Connection connection = getConnection()) { + try (PreparedStatement preparedStatement = + connection.prepareStatement("SELECT * FROM sample.customers WHERE customer_id = ?")) { + preparedStatement.setInt(1, customerId); + + String name; + int creditLimit; + int creditTotal; + try (ResultSet resultSet = preparedStatement.executeQuery()) { + if (!resultSet.next()) { + // If the customer info the specified customer ID doesn't exist, throw an exception + throw new RuntimeException("Customer not found"); + } + + name = resultSet.getString("name"); + creditLimit = resultSet.getInt("credit_limit"); + creditTotal = resultSet.getInt("credit_total"); + } + + // Commit the transaction + connection.commit(); + + // Return the customer info as a JSON format + return String.format( + "{\"id\": %d, \"name\": \"%s\", \"credit_limit\": %d, \"credit_total\": %d}", + customerId, name, creditLimit, creditTotal); + } catch (Exception e) { + connection.rollback(); + throw e; + } + } + } + + public String placeOrder(int customerId, int[] itemIds, int[] itemCounts) throws SQLException { + assert itemIds.length == itemCounts.length; + + try (Connection connection = getConnection()) { + try { + String orderId = UUID.randomUUID().toString(); + + // Put the order info into the orders table + try (PreparedStatement preparedStatement = + connection.prepareStatement( + "INSERT INTO sample.orders (customer_id, order_id, timestamp) VALUES (?, ?, ?)")) { + preparedStatement.setInt(1, customerId); + preparedStatement.setString(2, orderId); + preparedStatement.setLong(3, System.currentTimeMillis()); + preparedStatement.executeUpdate(); + } + + int amount = 0; + for (int i = 0; i < itemIds.length; i++) { + int itemId = itemIds[i]; + int count = itemCounts[i]; + + // Put the order statement into the statements table + try (PreparedStatement preparedStatement = + connection.prepareStatement( + "INSERT INTO sample.statements (order_id, item_id, count) VALUES (?, ?, ?)")) { + preparedStatement.setString(1, orderId); + preparedStatement.setInt(2, itemId); + preparedStatement.setInt(3, count); + preparedStatement.executeUpdate(); + } + + // Retrieve the item info from the items table + try (PreparedStatement preparedStatement = + connection.prepareStatement("SELECT * FROM sample.items WHERE item_id = ?")) { + preparedStatement.setInt(1, itemId); + + try (ResultSet resultSet = preparedStatement.executeQuery()) { + if (!resultSet.next()) { + // If the item info for the specified item ID doesn't exist, throw an exception + throw new RuntimeException("Item not found"); + } + + // Calculate the total amount + amount += resultSet.getInt("price") * count; + } + } + } + + // Check if the credit total exceeds the credit limit after payment + int updatedCreditTotal; + try (PreparedStatement preparedStatement = + connection.prepareStatement("SELECT * FROM sample.customers WHERE customer_id = ?")) { + preparedStatement.setInt(1, customerId); + + try (ResultSet resultSet = preparedStatement.executeQuery()) { + if (!resultSet.next()) { + // If the customer info for the specified customer ID doesn't exist, throw an + // exception + throw new RuntimeException("Customer not found"); + } + + int creditLimit = resultSet.getInt("credit_limit"); + int creditTotal = resultSet.getInt("credit_total"); + updatedCreditTotal = creditTotal + amount; + + if (updatedCreditTotal > creditLimit) { + throw new RuntimeException("Credit limit exceeded"); + } + } + } + + // Update credit_total for the customer + try (PreparedStatement preparedStatement = + connection.prepareStatement( + "UPDATE sample.customers SET credit_total = ? WHERE customer_id = ?")) { + preparedStatement.setInt(1, updatedCreditTotal); + preparedStatement.setInt(2, customerId); + preparedStatement.executeUpdate(); + } + + // Commit the transaction + connection.commit(); + + // Return the order id + return String.format("{\"order_id\": \"%s\"}", orderId); + } catch (Exception e) { + connection.rollback(); + throw e; + } + } + } + + private String getOrderJson(Connection connection, String orderId) throws SQLException { + // Retrieve the order info for the order ID from the orders table + int customerId; + long timestamp; + try (PreparedStatement preparedStatement = + connection.prepareStatement("SELECT * FROM sample.orders WHERE order_id = ?")) { + preparedStatement.setString(1, orderId); + + try (ResultSet resultSet = preparedStatement.executeQuery()) { + if (!resultSet.next()) { + // If the order info for the specified order ID doesn't exist, throw an exception + throw new RuntimeException("Order not found"); + } + + customerId = resultSet.getInt("customer_id"); + timestamp = resultSet.getLong("timestamp"); + } + } + + // Retrieve the customer info for the specified customer ID from the customers table + String customerName; + try (PreparedStatement preparedStatement = + connection.prepareStatement("SELECT * FROM sample.customers WHERE customer_id = ?")) { + preparedStatement.setInt(1, customerId); + + try (ResultSet resultSet = preparedStatement.executeQuery()) { + if (!resultSet.next()) { + // If the customer info for the specified customer ID doesn't exist, throw an exception + throw new RuntimeException("Customer not found"); + } + + customerName = resultSet.getString("name"); + } + } + + List statementJsons = new ArrayList<>(); + int total = 0; + + // Retrieve the order statements for the order ID from the statements table + try (PreparedStatement preparedStatementForStatements = + connection.prepareStatement("SELECT * FROM sample.statements WHERE order_id = ?")) { + preparedStatementForStatements.setString(1, orderId); + + try (ResultSet resultSetForStatements = preparedStatementForStatements.executeQuery()) { + while (resultSetForStatements.next()) { + int itemId = resultSetForStatements.getInt("item_id"); + + // Retrieve the item data from the items table + try (PreparedStatement preparedStatementForItems = + connection.prepareStatement("SELECT * FROM sample.items WHERE item_id = ?")) { + preparedStatementForItems.setInt(1, itemId); + + try (ResultSet resultSetForItems = preparedStatementForItems.executeQuery()) { + if (!resultSetForItems.next()) { + throw new RuntimeException("Item not found"); + } + + int price = resultSetForItems.getInt("price"); + int count = resultSetForStatements.getInt("count"); + + // Make the statements JSON + statementJsons.add( + String.format( + "{\"item_id\": %d, \"name\": \"%s\", \"price\": %d, \"count\": %d}", + itemId, resultSetForItems.getString("name"), price, count)); + + // Calculate the total amount + total += price * count; + } + } + } + } + + // Return the order info as a JSON format + return String.format( + "{\"order_id\": \"%s\",\"timestamp\": %d,\"customer_id\": %d,\"customer_name\": \"%s\",\"statement\": [%s],\"total\": %d}", + orderId, timestamp, customerId, customerName, String.join(",", statementJsons), total); + } + } + + public String getOrderByOrderId(String orderId) throws SQLException { + try (Connection connection = getConnection()) { + try { + // Get an order JSON for the specified order ID + String orderJson = getOrderJson(connection, orderId); + + // Commit the transaction + connection.commit(); + + // Return the order info as a JSON format + return String.format("{\"order\": %s}", orderJson); + } catch (Exception e) { + connection.rollback(); + throw e; + } + } + } + + public String getOrdersByCustomerId(int customerId) throws SQLException { + try (Connection connection = getConnection()) { + // Retrieve the order info for the customer ID from the orders table + try (PreparedStatement preparedStatement = + connection.prepareStatement("SELECT * FROM sample.orders WHERE customer_id = ?")) { + preparedStatement.setInt(1, customerId); + + List orderJsons = new ArrayList<>(); + try (ResultSet resultSet = preparedStatement.executeQuery()) { + // Make order JSONs for the orders of the customer + while (resultSet.next()) { + orderJsons.add(getOrderJson(connection, resultSet.getString("order_id"))); + } + } + + // Commit the transaction + connection.commit(); + + // Return the order info as a JSON format + return String.format("{\"order\": [%s]}", String.join(",", orderJsons)); + } catch (Exception e) { + connection.rollback(); + throw e; + } + } + } + + public void repayment(int customerId, int amount) throws SQLException { + try (Connection connection = getConnection()) { + try { + // Retrieve the customer info for the specified customer ID from the customers table + int updatedCreditTotal; + try (PreparedStatement preparedStatement = + connection.prepareStatement("SELECT * FROM sample.customers WHERE customer_id = ?")) { + preparedStatement.setInt(1, customerId); + + try (ResultSet resultSet = preparedStatement.executeQuery()) { + if (!resultSet.next()) { + // If the customer info for the specified customer ID doesn't exist, throw an + // exception + throw new RuntimeException("Customer not found"); + } + + updatedCreditTotal = resultSet.getInt("credit_total") - amount; + + // Check if over repayment or not + if (updatedCreditTotal < 0) { + throw new RuntimeException("Over repayment"); + } + } + } + + // Reduce credit_total for the customer + try (PreparedStatement preparedStatement = + connection.prepareStatement( + "UPDATE sample.customers SET credit_total = ? WHERE customer_id = ?")) { + preparedStatement.setInt(1, updatedCreditTotal); + preparedStatement.setInt(2, customerId); + preparedStatement.executeUpdate(); + } + + // Commit the transaction + connection.commit(); + } catch (Exception e) { + connection.rollback(); + throw e; + } + } + } + + private Connection getConnection() throws SQLException { + Connection connection = DriverManager.getConnection("jdbc:scalardb:scalardb-sql.properties"); + connection.setAutoCommit(false); + return connection; + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/command/GetCustomerInfoCommand.java b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/command/GetCustomerInfoCommand.java new file mode 100644 index 00000000..27c0210f --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/command/GetCustomerInfoCommand.java @@ -0,0 +1,19 @@ +package sample.command; + +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.Sample; + +@Command(name = "GetCustomerInfo", description = "Get customer information") +public class GetCustomerInfoCommand implements Callable { + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Override + public Integer call() throws Exception { + System.out.println(new Sample().getCustomerInfo(customerId)); + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/command/GetOrderCommand.java b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/command/GetOrderCommand.java new file mode 100644 index 00000000..fd4b8f9c --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/command/GetOrderCommand.java @@ -0,0 +1,19 @@ +package sample.command; + +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.Sample; + +@Command(name = "GetOrder", description = "Get order information by order ID") +public class GetOrderCommand implements Callable { + + @Parameters(index = "0", paramLabel = "ORDER_ID", description = "order ID") + private String orderId; + + @Override + public Integer call() throws Exception { + System.out.println(new Sample().getOrderByOrderId(orderId)); + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/command/GetOrdersCommand.java b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/command/GetOrdersCommand.java new file mode 100644 index 00000000..38bd1709 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/command/GetOrdersCommand.java @@ -0,0 +1,19 @@ +package sample.command; + +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.Sample; + +@Command(name = "GetOrders", description = "Get order information by customer ID") +public class GetOrdersCommand implements Callable { + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Override + public Integer call() throws Exception { + System.out.println(new Sample().getOrdersByCustomerId(customerId)); + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/command/LoadInitialDataCommand.java b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/command/LoadInitialDataCommand.java new file mode 100644 index 00000000..a7db43b3 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/command/LoadInitialDataCommand.java @@ -0,0 +1,15 @@ +package sample.command; + +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import sample.Sample; + +@Command(name = "LoadInitialData", description = "Load initial data") +public class LoadInitialDataCommand implements Callable { + + @Override + public Integer call() throws Exception { + new Sample().loadInitialData(); + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/command/PlaceOrderCommand.java b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/command/PlaceOrderCommand.java new file mode 100644 index 00000000..cc18b551 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/command/PlaceOrderCommand.java @@ -0,0 +1,36 @@ +package sample.command; + +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.Sample; + +@Command(name = "PlaceOrder", description = "Place an order") +public class PlaceOrderCommand implements Callable { + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Parameters( + index = "1", + paramLabel = "ORDERS", + description = "orders. The format is \":,:,...\"") + private String orders; + + @Override + public Integer call() throws Exception { + String[] split = orders.split(",", -1); + int[] itemIds = new int[split.length]; + int[] itemCounts = new int[split.length]; + + for (int i = 0; i < split.length; i++) { + String[] s = split[i].split(":", -1); + itemIds[i] = Integer.parseInt(s[0]); + itemCounts[i] = Integer.parseInt(s[1]); + } + + System.out.println(new Sample().placeOrder(customerId, itemIds, itemCounts)); + + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/command/RepaymentCommand.java b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/command/RepaymentCommand.java new file mode 100644 index 00000000..1bf4f583 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/command/RepaymentCommand.java @@ -0,0 +1,22 @@ +package sample.command; + +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.Sample; + +@Command(name = "Repayment", description = "Repayment") +public class RepaymentCommand implements Callable { + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Parameters(index = "1", paramLabel = "AMOUNT", description = "amount of the money for repayment") + private int amount; + + @Override + public Integer call() throws Exception { + new Sample().repayment(customerId, amount); + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/command/SampleCommand.java b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/command/SampleCommand.java new file mode 100644 index 00000000..0dfdf690 --- /dev/null +++ b/docs/3.12/scalardb-samples/scalardb-sql-jdbc-sample/src/main/java/sample/command/SampleCommand.java @@ -0,0 +1,37 @@ +package sample.command; + +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Option; + +@Command( + name = "bin/sample", + description = "Sample application for Microservice Transaction", + subcommands = { + LoadInitialDataCommand.class, + PlaceOrderCommand.class, + GetOrderCommand.class, + GetOrdersCommand.class, + GetCustomerInfoCommand.class, + RepaymentCommand.class + }) +public class SampleCommand implements Runnable { + + @Option( + names = {"-h", "--help"}, + usageHelp = true, + description = "Displays this help message and quits.", + defaultValue = "true") + private Boolean showHelp; + + @Override + public void run() { + if (showHelp) { + CommandLine.usage(this, System.out); + } + } + + public static void main(String[] args) { + new CommandLine(new SampleCommand()).execute(args); + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/README.md b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/README.md new file mode 100644 index 00000000..f7cd53ca --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/README.md @@ -0,0 +1,532 @@ +# Sample application of Spring Data JDBC for ScalarDB with Microservice Transactions + +This tutorial describes how to create a sample Spring Boot application for microservice transactions by using Spring Data JDBC for ScalarDB. + +For details about these features, see [Two-phase Commit Transactions](https://github.com/scalar-labs/scalardb/tree/master/docs/two-phase-commit-transactions.md) and [Guide of Spring Data JDBC for ScalarDB](https://github.com/scalar-labs/scalardb-sql/blob/main/docs/spring-data-guide.md). + +## Prerequisites + +- Java (OpenJDK 8 or higher) +- Gradle +- Docker, Docker Compose + +In addition, you need access to the [ScalarDB SQL GitHub repository](https://github.com/scalar-labs/scalardb-sql) and [Packages in ScalarDB SQL repository](https://github.com/orgs/scalar-labs/packages?repo_name=scalardb-sql). +These repositories are available only to users with a commercial license and permission. +To get a license and permission, please [contact us](https://scalar-labs.com/contact_us/). + +You also need the `gpr.user` property for your GitHub username and the `gpr.key` property for your personal access token. +You must either add these properties in `~/.gradle/gradle.properties` or specify the properties by using the `-P` option when running the `./gradlew` command as follows: + +```shell +$ ./gradlew run ... -Pgpr.user= -Pgpr.key= +```` + +Or you can also use environment variables, `USERNAME` for your GitHub username and `TOKEN` for your personal access token. + +```shell +$ export USERNAME= +$ export TOKEN= +``` + +For more details, see [Install - ScalarDB SQL](https://github.com/scalar-labs/scalardb-sql#install). + +## Sample application + +### Overview + +This tutorial describes how to create a Spring Boot sample application for microservice transactions for the same use case as [ScalarDB Sample](https://github.com/scalar-labs/scalardb-samples/tree/main/scalardb-sample) but by using Two-phase Commit Transactions in ScalarDB. + +There are two microservices called the *Customer Service* and the *Order Service* based on the [*Database-per-service* pattern](https://microservices.io/patterns/data/database-per-service.html) in this sample application. + +The Customer Service manages customers' information including credit card information like a credit limit and a credit total. +The Order Service is responsible for order operations like placing an order and getting order histories. +Each service has gRPC endpoints. Clients call the endpoints, and the services call the endpoints each other as well. +The Customer Service and the Order Service use MySQL and Cassandra through ScalarDB, respectively. + +![Overview](images/overview.png) + +Note that both services access a small coordinator database used for the Consensus Commit protocol. +The coordinator database is service-independent and exists for managing transaction metadata for Consensus Commit in a highly available manner. We believe the architecture does not spoil the benefits of the database-per-service pattern. +*NOTE: We also plan to create a microservice container for the coordinator database to truly achieve the database-per-service pattern.* + +In this sample application, for ease of setup and explanation, we co-locate the coordinator database in the same Cassandra instance of the Order Service, but of course, the coordinator database can be managed as a separate database. + +Also, note that application-specific error handling, authentication processing, etc., are omitted in the sample application since it focuses on explaining how to use ScalarDB. +Please see [this document](https://github.com/scalar-labs/scalardb/blob/master/docs/two-phase-commit-transactions.md#handle-exceptions) for the details of how to handle exceptions in ScalarDB. + +Additionally, you assume each service has one container in this sample application to avoid considering request routing between the services. +However, for production, because each service typically has multiple servers (or hosts) for scalability and availability, please consider to use ScalarDB Cluster which easily addresses request routing between the services in Two-phase Commit Transactions. +Please see [this document](https://github.com/scalar-labs/scalardb/blob/master/docs/two-phase-commit-transactions.md#request-routing-in-two-phase-commit-transactions) for the details of Request Routing in Two-phase Commit Transactions. + +### Schema + +[The schema](schema.sql) is as follows: + +```sql +CREATE COORDINATOR TABLES IF NOT EXIST; + +CREATE NAMESPACE IF NOT EXISTS customer_service; + +CREATE TABLE IF NOT EXISTS customer_service.customers ( + customer_id INT PRIMARY KEY, + name TEXT, + credit_limit INT, + credit_total INT +); + +CREATE NAMESPACE IF NOT EXISTS order_service; + +CREATE TABLE IF NOT EXISTS order_service.orders ( + customer_id INT, + timestamp BIGINT, + order_id TEXT, + PRIMARY KEY (customer_id, timestamp) +); + +CREATE INDEX IF NOT EXISTS ON order_service.orders (order_id); + +CREATE TABLE IF NOT EXISTS order_service.statements ( + order_id TEXT, + item_id INT, + count INT, + PRIMARY KEY (order_id, item_id) +); + +CREATE TABLE IF NOT EXISTS order_service.items ( + item_id INT PRIMARY KEY, + name TEXT, + price INT +); +``` + +All the tables are created in the `customer_service` and `order_service` namespaces. + +- `customer_service.customers`: a table that manages customers' information + - `credit_limit`: the maximum amount of money a lender will allow each customer to spend when using a credit card + - `credit_total`: the amount of money that each customer has already spent by using the credit card +- `order_service.orders`: a table that manages order information +- `order_service.statements`: a table that manages order statement information +- `order_service.items`: a table that manages information of items to be ordered + +The Entity Relationship Diagram for the schema is as follows: + +![ERD](images/ERD.png) + +### Service endpoints + +The endpoints defined in the services are as follows: + +- Customer Service + - `getCustomerInfo` + - `payment` + - `prepare` + - `validate` + - `commit` + - `rollback` + - `repayment` + +- Order Service + - `placeOrder` + - `getOrder` + - `getOrders` + +### What you can do in this sample application + +The sample application supports the following types of transactions: + +- Get customer information through the `getCustomerInfo` endpoint of the Customer Service. +- Place an order by using a line of credit through the `placeOrder` endpoint of the Order Service and the `payment`, `prepare`, `validate`, `commit`, and `rollback` endpoints of the Customer Service. + - Checks if the cost of the order is below the customer's credit limit. + - If the check passes, records the order history and updates the amount the customer has spent. +- Get order information by order ID through the `getOrder` endpoint of the Order Service and the `getCustomerInfo`, `prepare`, `validate`, `commit`, and `rollback` endpoints of the Customer Service. +- Get order information by customer ID through the `getOrders` endpoint of the Order Service and the `getCustomerInfo`, `prepare`, `validate`, `commit`, and `rollback` endpoints of the Customer Service. +- Make a payment through the `repayment` endpoint of the Customer Service. + - Reduces the amount the customer has spent. + +{% capture notice--info %} +**Note** + +The `getCustomerInfo` endpoint works as a participant service endpoint when receiving a transaction ID from the coordinator. + +{% endcapture %} + +
{{ notice--info | markdownify }}
+ +## Configuration + +[The configuration for the Customer Service](customer-service/src/main/resources/application.properties) is as follows: + +```application.properties +spring.datasource.driver-class-name=com.scalar.db.sql.jdbc.SqlJdbcDriver +spring.datasource.url=jdbc:scalardb:\ +?scalar.db.sql.connection_mode=direct\ +&scalar.db.storage=multi-storage\ +&scalar.db.multi_storage.storages=cassandra,mysql\ +&scalar.db.multi_storage.storages.cassandra.storage=cassandra\ +&scalar.db.multi_storage.storages.cassandra.contact_points=cassandra\ +&scalar.db.multi_storage.storages.cassandra.username=cassandra\ +&scalar.db.multi_storage.storages.cassandra.password=cassandra\ +&scalar.db.multi_storage.storages.mysql.storage=jdbc\ +&scalar.db.multi_storage.storages.mysql.contact_points=jdbc:mysql://mysql:3306/\ +&scalar.db.multi_storage.storages.mysql.username=root\ +&scalar.db.multi_storage.storages.mysql.password=mysql\ +&scalar.db.multi_storage.namespace_mapping=customer_service:mysql,order_service:cassandra,coordinator:cassandra\ +&scalar.db.multi_storage.default_storage=mysql\ +&scalar.db.sql.default_transaction_mode=two_phase_commit_transaction\ +&scalar.db.consensus_commit.isolation_level=SERIALIZABLE +``` + +- `scalar.db.sql.connection_mode`: This configuration decides how to connect to ScalarDB. +- `scalar.db.storage`: Specifying `multi-storage` is necessary to use Multi-storage Transactions in ScalarDB. +- `scalar.db.multi_storage.storages`: Your storage names must be defined here. +- `scalar.db.multi_storage.storages.cassandra.*`: These configurations are for the `cassandra` storage, which is one of the storage names defined in `scalar.db.multi_storage.storages`. You can configure all the `scalar.db.*` properties for the `cassandra` storage here. +- `scalar.db.multi_storage.storages.mysql.*`: These configurations are for the `mysql` storage, which is one of the storage names defined in `scalar.db.multi_storage.storages`. You can configure all the `scalar.db.*` properties for the `mysql` storage here. +- `scalar.db.multi_storage.namespace_mapping`: This configuration maps the namespaces to the storage. In this sample application, operations for `customer_service` namespace tables are mapped to the `mysql` storage and operations for `order_service` namespace tables are mapped to the `cassandra` storage. You can also define which storage is mapped for the `coordinator` namespace that is used in Consensus Commit transactions. +- `scalar.db.multi_storage.default_storage`: This configuration sets the default storage that is used for operations on unmapped namespace tables. +- `scalar.db.sql.default_transaction_mode`: Specifying `two_phase_commit_transaction` is necessary to use Two-Phase Commit Transactions mode in ScalarDB. +- `scalar.db.consensus_commit.isolation_level`: This configuration decides the isolation level used for ConsensusCommit. + +For details, please see [Configuration - Multi-storage Transactions](https://github.com/scalar-labs/scalardb/blob/master/docs/multi-storage-transactions.md#configuration). + +[The configuration for the Order Service](order-service/src/main/resources/application.properties) is as follows: + +```application.properties +spring.datasource.driver-class-name=com.scalar.db.sql.jdbc.SqlJdbcDriver +spring.datasource.url=jdbc:scalardb:\ +?scalar.db.sql.connection_mode=direct\ +&scalar.db.storage=cassandra\ +&scalar.db.contact_points=cassandra\ +&scalar.db.username=cassandra\ +&scalar.db.password=cassandra\ +&scalar.db.sql.default_namespace_name=order_service\ +&scalar.db.sql.default_transaction_mode=two_phase_commit_transaction\ +&scalar.db.consensus_commit.isolation_level=SERIALIZABLE +``` + +- `scalar.db.storage`: `cassandra` is specified since this servcise uses only Cassandra as an underlying database. +- `scalar.db.contact_points`: This configuration specifies the contact points (e.g., host) for connecting to Cassandra. +- `scalar.db.username`: This configuration specifies the username for connecting to Cassandra. +- `scalar.db.password`: This configuration specifies the password for connecting to Cassandra. +- `scalar.db.sql.default_namespace_name`: This configuration sets the default namespace to `order_service`, eliminating the need for the application to specify namespaces. +- `scalar.db.sql.default_transaction_mode`: Specifying `two_phase_commit_transaction` is necessary to use Two-Phase Commit Transactions mode in ScalarDB. +- `scalar.db.consensus_commit.isolation_level`: This configuration decides the isolation level used for ConsensusCommit. + +## Setup + +### Start Cassandra and MySQL + +To start Cassandra and MySQL, you need to run the following `docker-compose` command: + +```shell +$ docker-compose up -d cassandra mysql +``` + +Please note that you need to wait around more than one minute for the containers to be fully started. + +### Load schema + +You then need to apply the schema with the following command. +To download the CLI tool, `scalardb-sql-cli--all.jar`, see the [Releases](https://github.com/scalar-labs/scalardb-sql/releases) of ScalarDB SQL and download the version that you want to use. + +```shell +$ java -jar scalardb-sql-cli--all.jar --config scalardb-sql.properties --file schema.sql +``` + +### Start Microservices + +First, you need to build the docker images of the sample application with the following command: + +```shell +$ ./gradlew docker +``` + +Then, you can start the microservices with the following `docker-compose` command: + +```shell +$ docker-compose up -d customer-service order-service +``` + +### Initial data + +When the microservices start, the initial data is loaded automatically. + +After the initial data has loaded, the following records should be stored in the tables: + +- For the `customer_service.customers` table: + +| customer_id | name | credit_limit | credit_total | +|-------------|---------------|--------------|--------------| +| 1 | Yamada Taro | 10000 | 0 | +| 2 | Yamada Hanako | 10000 | 0 | +| 3 | Suzuki Ichiro | 10000 | 0 | + +- For the `order_service.items` table: + +| item_id | name | price | +|---------|--------|-------| +| 1 | Apple | 1000 | +| 2 | Orange | 2000 | +| 3 | Grape | 2500 | +| 4 | Mango | 5000 | +| 5 | Melon | 3000 | + +## Run the sample application + +Let's start with getting information about the customer whose ID is `1`: + +```shell +$ ./gradlew :client:run --args="GetCustomerInfo 1" +... +{"id": 1,"name": "Yamada Taro","credit_limit": 10000} +... +``` + +At this time, `credit_total` isn't shown, which means the current value of `credit_total` is `0`. + +Then, place an order for three apples and two oranges by using customer ID `1`. +Note that the order format is `:,:,...`: + +```shell +$ ./gradlew :client:run --args="PlaceOrder 1 1:3,2:2" +... +{"order_id": "415a453b-cfee-4c48-b8f6-d103d3e10bdb"} +... +``` + +You can see that running this command shows the order ID. + +Let's check the details of the order by using the order ID: + +```shell +$ ./gradlew :client:run --args="GetOrder 415a453b-cfee-4c48-b8f6-d103d3e10bdb" +... +{"order": {"order_id": "415a453b-cfee-4c48-b8f6-d103d3e10bdb","timestamp": 1686555272435,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 1,"item_name": "Apple","price": 1000,"count": $ +,"total": 3000},{"item_id": 2,"item_name": "Orange","price": 2000,"count": 2,"total": 4000}],"total": 7000}} +... +``` + +Then, let's place another order and get the order history of customer ID `1`: + +```shell +$ ./gradlew :client:run --args="PlaceOrder 1 5:1" +... +{"order_id": "069be075-98f7-428c-b2e0-6820693fc41b"} +... +$ ./gradlew :client:run --args="GetOrders 1" +... +{"order": [{"order_id": "069be075-98f7-428c-b2e0-6820693fc41b","timestamp": 1686555279366,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 5,"item_name": "Melon","price": 3000,"count": 1,"total": 3000}],"total": 3000},{"order_id": "415a453b-cfee-4c48-b8f6-d103d3e10bdb","timestamp": 1686555272435,"customer_id": 1,"customer_name": "Yamada Taro","statement": [{"item_id": 1,"item_name": "Apple","price": 1000,"count": 3,"total": 3000},{"item_id": 2,"item_name": "Orange","price": 2000,"count": 2,"total": 4000}],"total": 7000}]} +... +``` + +This order history is shown in descending order by timestamp. + +The customer's current `credit_total` is `10000`. +Since the customer has now reached their `credit_limit`, which was shown when retrieving their information, they cannot place anymore orders. + +```shell +$ ./gradlew :client:run --args="GetCustomerInfo 1" +... +{"id": 1,"name": "Yamada Taro","credit_limit": 10000,"credit_total": 10000} +... +$ ./gradlew :client:run --args="PlaceOrder 1 3:1,4:1" +... +io.grpc.StatusRuntimeException: FAILED_PRECONDITION: Credit limit exceeded. creditTotal:10000, payment:7500 + at io.grpc.stub.ClientCalls.toStatusRuntimeException(ClientCalls.java:271) + at io.grpc.stub.ClientCalls.getUnchecked(ClientCalls.java:252) + at io.grpc.stub.ClientCalls.blockingUnaryCall(ClientCalls.java:165) + at sample.rpc.OrderServiceGrpc$OrderServiceBlockingStub.placeOrder(OrderServiceGrpc.java:296) + at sample.client.command.PlaceOrderCommand.call(PlaceOrderCommand.java:38) + at sample.client.command.PlaceOrderCommand.call(PlaceOrderCommand.java:12) + at picocli.CommandLine.executeUserObject(CommandLine.java:2041) + at picocli.CommandLine.access$1500(CommandLine.java:148) + at picocli.CommandLine$RunLast.executeUserObjectOfLastSubcommandWithSameParent(CommandLine.java:2461) + at picocli.CommandLine$RunLast.handle(CommandLine.java:2453) + at picocli.CommandLine$RunLast.handle(CommandLine.java:2415) + at picocli.CommandLine$AbstractParseResultHandler.execute(CommandLine.java:2273) + at picocli.CommandLine$RunLast.execute(CommandLine.java:2417) + at picocli.CommandLine.execute(CommandLine.java:2170) + at sample.client.Client.main(Client.java:39) +... +``` + +After making a payment, the customer will be able to place orders again. + +```shell +$ ./gradlew :client:run --args="Repayment 1 8000" +... +$ ./gradlew :client:run --args="GetCustomerInfo 1" +... +{"id": 1,"name": "Yamada Taro","credit_limit": 10000,"credit_total": 2000} +... +$ ./gradlew :client:run --args="PlaceOrder 1 3:1,4:1" +... +{"order_id": "b6adabd8-0a05-4109-9618-3420fea3161f"} +... +``` + +## Clean up + +To stop Cassandra, MySQL and the Microservices, run the following command: + +```shell +$ docker-compose down +``` + +## Reference - How the microservice transaction is achieved + +The transactions for placing an order, getting a single order, and getting the history of orders achieve the microservice transaction. This section focuses on how the transactions that span the Customer Service and the Order Service are implemented by placing an order as an example. + +The following sequence diagram shows the transaction for placing an order: + +![Sequence Diagram](images/sequence_diagram.png) + +### 1. Transaction with a two-phase commit interface is started + +When a client sends a request to place an order to the Order Service, `OrderService.placeOrder()` is called, and the microservice transaction starts. + +At first, the Order Service starts a transaction with a two-phase commit interface with `ScalarDbTwoPcRepository.executeTwoPcTransaction()` as follows. For reference, see [`OrderService.java`](order-service/src/main/java/sample/order/OrderService.java). + +```java +// Start a two-phase commit interface transaction +TwoPcResult result = orderRepository.executeTwoPcTransaction(txId -> { + ... +}, ...); +``` + +The actions in [CRUD operations are executed](#2-crud-operations-are-executed), [Transaction is committed by using the two-phase commit protocol](#3-transaction-is-committed-by-using-the-two-phase-commit-protocol), and [Error handling](#error-handling) are automatically performed by the API. + +### 2. CRUD operations are executed + +After the transaction with a two-phase commit interface starts, CRUD operations are executed by `ScalarDbTwoPcRepository.executeTwoPcTransaction()`. The Order Service puts the order information in the `order_service.orders` table and the detailed information in the `order_service.statements` table as follows. For reference, see [`OrderService.java`](order-service/src/main/java/sample/order/OrderService.java). + +```java +// Put the order info into the `orders` table +orderRepository.insert(order); + +AtomicInteger amount = new AtomicInteger(); +for (ItemOrder itemOrder : request.getItemOrderList()) { + int itemId = itemOrder.getItemId(); + int count = itemOrder.getCount(); + // Retrieve the item info from the `items` table + Optional itemOpt = itemRepository.findById(itemId); + if (!itemOpt.isPresent()) { + String message = "Item not found: " + itemId; + responseObserver.onError( + Status.NOT_FOUND.withDescription(message).asRuntimeException()); + throw new ScalarDbNonTransientException(message); + } + Item item = itemOpt.get(); + + int cost = item.price * count; + // Put the order statement into the `statements` table + statementRepository.insert(new Statement(itemId, orderId, count)); + // Calculate the total amount + amount.addAndGet(cost); +} +``` + +Then, the Order Service calls the `payment` gRPC endpoint of the Customer Service along with the transaction ID. For reference, see [`OrderService.java`](order-service/src/main/java/sample/order/OrderService.java). + +```java +customerServiceStub.payment( + PaymentRequest.newBuilder() + .setTransactionId(transactionId) + .setCustomerId(customerId) + .setAmount(amount) + .build()); +``` + +The `payment` endpoint of the Customer Service first joins the transaction with `ScalarDbTwoPcRepository.joinTransactionOnParticipant()` as follows. For reference, see [`CustomerService.java`](customer-service/src/main/java/sample/customer/CustomerService.java). + +```java +customerRepository.joinTransactionOnParticipant(request.getTransactionId(), ...); +``` + +The endpoint then gets the customer information and checks if the customer's credit total exceeds the credit limit after the payment. If the credit total does not exceed the credit limit, the endpoint updates the customer's credit total. For reference, see [`CustomerService.java`](customer-service/src/main/java/sample/customer/CustomerService.java). + +```java +Customer customer = getCustomer(responseObserver, request.getCustomerId()); + +int updatedCreditTotal = customer.creditTotal + request.getAmount(); +// Check if the credit total exceeds the credit limit after payment +if (updatedCreditTotal > customer.creditLimit) { + String message = String.format( + "Credit limit exceeded. creditTotal:%d, payment:%d", customer.creditTotal, request.getAmount()); + responseObserver.onError( + Status.FAILED_PRECONDITION.withDescription(message).asRuntimeException()); + throw new ScalarDbNonTransientException(message); +} + +// Reduce `credit_total` for the customer +customerRepository.update(customer.withCreditTotal(updatedCreditTotal)); +``` + +### 3. Transaction is committed by using the two-phase commit protocol + +After the Order Service receives the update that the payment succeeded, the Order Service tries to commit the transaction. + +The `ScalarDbTwoPcRepository.executeTwoPcTransaction()` API, which called on the Order Service, automatically performs preparations, validations, and commits of both the local Order Service and the remote Customer Service. These steps are executed sequentially after the above CRUD operations successfully finish. The implementations to invoke `prepare`, `validate`, and `commit` gRPC endpoints of the Customer Service need to be passed as parameters to the API. For reference, see [`OrderService.java`](order-service/src/main/java/sample/order/OrderService.java). + +```java +TwoPcResult result = orderRepository.executeTwoPcTransaction(txId ->{ + ... + }, + + Collections.singletonList( + RemotePrepareCommitPhaseOperations.createSerializable( + this::callPrepareEndpoint, + this::callValidateEndpoint, + this::callCommitEndpoint, + this::callRollbackEndpoint + ) + ) +); +``` + +![Sequence Diagram of High Level 2PC API](images/seq-diagram-high-level-2pc-api.png) + +In the `prepare` endpoint of the Customer Service, the endpoint resumes and prepares the transaction by using `ScalarDbTwoPcRepository.prepareTransactionOnParticipant()`. For reference, see [`CustomerService.java`](customer-service/src/main/java/sample/customer/CustomerService.java). + +```java +customerRepository.prepareTransactionOnParticipant(request.getTransactionId()); +``` + +In the `validate` endpoint of the Customer Service, the endpoint resumes and validates the transaction by using `ScalarDbTwoPcRepository.validateTransactionOnParticipant()`. For reference, see [`CustomerService.java`](customer-service/src/main/java/sample/customer/CustomerService.java). + +```java +customerRepository.validateTransactionOnParticipant(request.getTransactionId()); +``` + +In the `commit` endpoint of the Customer Service, the endpoint resumes and commits the transaction by using `ScalarDbTwoPcRepository.commitTransactionOnParticipant()`. For reference, see [`CustomerService.java`](customer-service/src/main/java/sample/customer/CustomerService.java). + +```java +customerRepository.commitTransactionOnParticipant(request.getTransactionId()); +``` + +### Error handling + +If an error happens while executing a transaction, `ScalarDbTwoPcRepository.executeTwoPcTransaction()` will automatically roll back the transaction in both the local Order Service and the remote Customer Service. The implementation to invoke the `rollback` gRPC endpoint of the Customer Service also needs to be passed as a parameter to the API with other ones. For reference, see [`OrderService.java`](order-service/src/main/java/sample/order/OrderService.java). + +```java +TwoPcResult result = orderRepository.executeTwoPcTransaction(txId ->{ + ... + }, + + Collections.singletonList( + RemotePrepareCommitPhaseOperations.createSerializable( + this::callPrepareEndpoint, + this::callValidateEndpoint, + this::callCommitEndpoint, + this::callRollbackEndpoint + ) + ) +); +``` + +In the `rollback` endpoint of the Customer Service, the endpoint resumes and rolls back the transaction. For reference, see [`CustomerService.java`](customer-service/src/main/java/sample/customer/CustomerService.java). + +```java +customerRepository.rollbackTransactionOnParticipant(request.getTransactionId()); +``` + +For details about how to handle exceptions in ScalarDB, see [Handle exceptions](https://github.com/scalar-labs/scalardb/blob/master/docs/two-phase-commit-transactions.md#handle-exceptions). diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/build.gradle b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/build.gradle new file mode 100644 index 00000000..0f448b4b --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/build.gradle @@ -0,0 +1,26 @@ +subprojects { + group = "sample" + project.version = '1.0' + + ext { + grpcVersion = '1.53.0' + protocVersion = '3.23.1' + scalarDbVersion = '3.9.1' + picoCliVersion = '4.7.1' + protobufJavaFormatVersion = '1.4' + slf4jVersion = '2.0.7' + springBootVersion = '2.7.12' + springRetryVersion = '1.3.4' + } + + repositories { + mavenCentral() + maven { + url = uri("https://maven.pkg.github.com/scalar-labs/scalardb-sql") + credentials { + username = project.findProperty("gpr.user") ?: System.getenv("USERNAME") + password = project.findProperty("gpr.key") ?: System.getenv("TOKEN") + } + } + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/bin/main/sample/client/Client.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/bin/main/sample/client/Client.class new file mode 100644 index 00000000..dbc3b5d2 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/bin/main/sample/client/Client.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/bin/main/sample/client/command/GetCustomerInfoCommand.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/bin/main/sample/client/command/GetCustomerInfoCommand.class new file mode 100644 index 00000000..ef1160d4 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/bin/main/sample/client/command/GetCustomerInfoCommand.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/bin/main/sample/client/command/GetOrderCommand.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/bin/main/sample/client/command/GetOrderCommand.class new file mode 100644 index 00000000..098395f3 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/bin/main/sample/client/command/GetOrderCommand.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/bin/main/sample/client/command/GetOrdersCommand.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/bin/main/sample/client/command/GetOrdersCommand.class new file mode 100644 index 00000000..3b85ef0f Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/bin/main/sample/client/command/GetOrdersCommand.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/bin/main/sample/client/command/PlaceOrderCommand.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/bin/main/sample/client/command/PlaceOrderCommand.class new file mode 100644 index 00000000..27876c40 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/bin/main/sample/client/command/PlaceOrderCommand.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/bin/main/sample/client/command/RepaymentCommand.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/bin/main/sample/client/command/RepaymentCommand.class new file mode 100644 index 00000000..986857b2 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/bin/main/sample/client/command/RepaymentCommand.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/bin/main/sample/client/command/Utils.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/bin/main/sample/client/command/Utils.class new file mode 100644 index 00000000..f76ccf8f Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/bin/main/sample/client/command/Utils.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/build.gradle b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/build.gradle new file mode 100644 index 00000000..d9e03945 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/build.gradle @@ -0,0 +1,19 @@ +plugins { + id 'java' + id 'application' +} + +dependencies { + implementation project(':rpc') + implementation "info.picocli:picocli:${picoCliVersion}" + implementation "com.googlecode.protobuf-java-format:protobuf-java-format:${protobufJavaFormatVersion}" +} + +application { + mainClassName = 'sample.client.Client' +} + +archivesBaseName = "sample-order-service" + +sourceCompatibility = 1.8 +targetCompatibility = 1.8 diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/src/main/java/sample/client/Client.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/src/main/java/sample/client/Client.java new file mode 100644 index 00000000..55afdd4b --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/src/main/java/sample/client/Client.java @@ -0,0 +1,41 @@ +package sample.client; + +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Option; +import sample.client.command.GetCustomerInfoCommand; +import sample.client.command.GetOrderCommand; +import sample.client.command.GetOrdersCommand; +import sample.client.command.PlaceOrderCommand; +import sample.client.command.RepaymentCommand; + +@Command( + name = "bin/client", + description = "Sample application for Microservice Transaction", + subcommands = { + PlaceOrderCommand.class, + GetOrderCommand.class, + GetOrdersCommand.class, + GetCustomerInfoCommand.class, + RepaymentCommand.class + }) +public class Client implements Runnable { + + @Option( + names = {"-h", "--help"}, + usageHelp = true, + description = "Displays this help message and quits.", + defaultValue = "true") + private Boolean showHelp; + + public static void main(String[] args) { + new CommandLine(new Client()).execute(args); + } + + @Override + public void run() { + if (showHelp) { + CommandLine.usage(this, System.out); + } + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/src/main/java/sample/client/command/GetCustomerInfoCommand.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/src/main/java/sample/client/command/GetCustomerInfoCommand.java new file mode 100644 index 00000000..b8d03293 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/src/main/java/sample/client/command/GetCustomerInfoCommand.java @@ -0,0 +1,35 @@ +package sample.client.command; + +import io.grpc.ManagedChannel; +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.rpc.CustomerServiceGrpc; +import sample.rpc.GetCustomerInfoRequest; +import sample.rpc.GetCustomerInfoResponse; + +@Command(name = "GetCustomerInfo", description = "Get customer information") +public class GetCustomerInfoCommand implements Callable { + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Override + public Integer call() { + ManagedChannel channel = Utils.getCustomerServiceChannel(); + try { + CustomerServiceGrpc.CustomerServiceBlockingStub stub = + CustomerServiceGrpc.newBlockingStub(channel); + GetCustomerInfoResponse response = + stub.getCustomerInfo( + GetCustomerInfoRequest.newBuilder().setCustomerId(customerId).build()); + Utils.printJsonString(response); + return 0; + } catch (Exception e) { + e.printStackTrace(); + return 1; + } finally { + Utils.shutdownChannel(channel); + } + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/src/main/java/sample/client/command/GetOrderCommand.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/src/main/java/sample/client/command/GetOrderCommand.java new file mode 100644 index 00000000..7af8ff1c --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/src/main/java/sample/client/command/GetOrderCommand.java @@ -0,0 +1,33 @@ +package sample.client.command; + +import io.grpc.ManagedChannel; +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.rpc.GetOrderRequest; +import sample.rpc.GetOrderResponse; +import sample.rpc.OrderServiceGrpc; + +@Command(name = "GetOrder", description = "Get order information by order ID") +public class GetOrderCommand implements Callable { + + @Parameters(index = "0", paramLabel = "ORDER_ID", description = "order ID") + private String orderId; + + @Override + public Integer call() { + ManagedChannel channel = Utils.getOrderServiceChannel(); + try { + OrderServiceGrpc.OrderServiceBlockingStub stub = OrderServiceGrpc.newBlockingStub(channel); + GetOrderResponse response = + stub.getOrder(GetOrderRequest.newBuilder().setOrderId(orderId).build()); + Utils.printJsonString(response); + return 0; + } catch (Exception e) { + e.printStackTrace(); + return 1; + } finally { + Utils.shutdownChannel(channel); + } + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/src/main/java/sample/client/command/GetOrdersCommand.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/src/main/java/sample/client/command/GetOrdersCommand.java new file mode 100644 index 00000000..430518f8 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/src/main/java/sample/client/command/GetOrdersCommand.java @@ -0,0 +1,33 @@ +package sample.client.command; + +import io.grpc.ManagedChannel; +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.rpc.GetOrdersRequest; +import sample.rpc.GetOrdersResponse; +import sample.rpc.OrderServiceGrpc; + +@Command(name = "GetOrders", description = "Get order information by customer ID") +public class GetOrdersCommand implements Callable { + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Override + public Integer call() { + ManagedChannel channel = Utils.getOrderServiceChannel(); + try { + OrderServiceGrpc.OrderServiceBlockingStub stub = OrderServiceGrpc.newBlockingStub(channel); + GetOrdersResponse response = + stub.getOrders(GetOrdersRequest.newBuilder().setCustomerId(customerId).build()); + Utils.printJsonString(response); + return 0; + } catch (Exception e) { + e.printStackTrace(); + return 1; + } finally { + Utils.shutdownChannel(channel); + } + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/src/main/java/sample/client/command/PlaceOrderCommand.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/src/main/java/sample/client/command/PlaceOrderCommand.java new file mode 100644 index 00000000..5c0c8223 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/src/main/java/sample/client/command/PlaceOrderCommand.java @@ -0,0 +1,49 @@ +package sample.client.command; + +import io.grpc.ManagedChannel; +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.rpc.ItemOrder; +import sample.rpc.OrderServiceGrpc; +import sample.rpc.PlaceOrderRequest; +import sample.rpc.PlaceOrderResponse; + +@Command(name = "PlaceOrder", description = "Place an order") +public class PlaceOrderCommand implements Callable { + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Parameters( + index = "1", + paramLabel = "ORDERS", + description = "orders. The format is \":,:,...\"") + private String orders; + + @Override + public Integer call() { + ManagedChannel channel = Utils.getOrderServiceChannel(); + try { + OrderServiceGrpc.OrderServiceBlockingStub stub = OrderServiceGrpc.newBlockingStub(channel); + + PlaceOrderRequest.Builder builder = PlaceOrderRequest.newBuilder().setCustomerId(customerId); + for (String order : orders.split(",", -1)) { + String[] s = order.split(":", -1); + int itemId = Integer.parseInt(s[0]); + int count = Integer.parseInt(s[1]); + builder.addItemOrder(ItemOrder.newBuilder().setItemId(itemId).setCount(count).build()); + } + + PlaceOrderResponse response = stub.placeOrder(builder.build()); + + Utils.printJsonString(response); + return 0; + } catch (Exception e) { + e.printStackTrace(); + return 1; + } finally { + Utils.shutdownChannel(channel); + } + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/src/main/java/sample/client/command/RepaymentCommand.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/src/main/java/sample/client/command/RepaymentCommand.java new file mode 100644 index 00000000..fa447abc --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/src/main/java/sample/client/command/RepaymentCommand.java @@ -0,0 +1,38 @@ +package sample.client.command; + +import com.google.protobuf.Empty; +import io.grpc.ManagedChannel; +import java.util.concurrent.Callable; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.rpc.CustomerServiceGrpc; +import sample.rpc.RepaymentRequest; + +@Command(name = "Repayment", description = "Repayment") +public class RepaymentCommand implements Callable { + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Parameters(index = "1", paramLabel = "AMOUNT", description = "amount of the money for repayment") + private int amount; + + @Override + public Integer call() { + ManagedChannel channel = Utils.getCustomerServiceChannel(); + try { + CustomerServiceGrpc.CustomerServiceBlockingStub stub = + CustomerServiceGrpc.newBlockingStub(channel); + Empty response = + stub.repayment( + RepaymentRequest.newBuilder().setCustomerId(customerId).setAmount(amount).build()); + Utils.printJsonString(response); + return 0; + } catch (Exception e) { + e.printStackTrace(); + return 1; + } finally { + Utils.shutdownChannel(channel); + } + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/src/main/java/sample/client/command/Utils.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/src/main/java/sample/client/command/Utils.java new file mode 100644 index 00000000..0d0cba28 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/client/src/main/java/sample/client/command/Utils.java @@ -0,0 +1,35 @@ +package sample.client.command; + +import com.google.protobuf.Message; +import com.googlecode.protobuf.format.JsonFormat; +import io.grpc.ManagedChannel; +import io.grpc.netty.NettyChannelBuilder; +import java.util.concurrent.TimeUnit; + +public final class Utils { + + private static final JsonFormat JSON_FORMAT = new JsonFormat(); + + private Utils() { + } + + public static ManagedChannel getCustomerServiceChannel() { + return NettyChannelBuilder.forAddress("localhost", 10010).usePlaintext().build(); + } + + public static ManagedChannel getOrderServiceChannel() { + return NettyChannelBuilder.forAddress("localhost", 10020).usePlaintext().build(); + } + + public static void shutdownChannel(ManagedChannel channel) { + try { + channel.shutdown().awaitTermination(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + System.err.println("failed to shutdown the channel"); + } + } + + public static void printJsonString(Message message) { + System.out.println(JSON_FORMAT.printToString(message)); + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/Dockerfile b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/Dockerfile new file mode 100644 index 00000000..d906df9e --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/Dockerfile @@ -0,0 +1,11 @@ +FROM openjdk:8 + +WORKDIR / + +ADD customer-service.tar . + +WORKDIR /customer-service + +ENTRYPOINT ["./bin/customer-service"] + +EXPOSE 10010 diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/bin/main/sample/customer/CustomerService.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/bin/main/sample/customer/CustomerService.class new file mode 100644 index 00000000..08caaaa1 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/bin/main/sample/customer/CustomerService.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/bin/main/sample/customer/CustomerServiceServer.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/bin/main/sample/customer/CustomerServiceServer.class new file mode 100644 index 00000000..cd21bed2 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/bin/main/sample/customer/CustomerServiceServer.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/bin/main/sample/customer/domain/model/Customer.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/bin/main/sample/customer/domain/model/Customer.class new file mode 100644 index 00000000..c80e1e2c Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/bin/main/sample/customer/domain/model/Customer.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/bin/main/sample/customer/domain/repository/CustomerRepository.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/bin/main/sample/customer/domain/repository/CustomerRepository.class new file mode 100644 index 00000000..14bd47f2 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/bin/main/sample/customer/domain/repository/CustomerRepository.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/build.gradle b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/build.gradle new file mode 100644 index 00000000..c916de2a --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/build.gradle @@ -0,0 +1,44 @@ +plugins { + id 'java' + id 'java-library-distribution' + id 'application' + id 'com.palantir.docker' version '0.25.0' +} + +dependencies { + implementation project(':rpc') + + implementation "org.slf4j:slf4j-simple:${slf4jVersion}" + implementation "com.scalar-labs:scalardb-sql-spring-data:${scalarDbVersion}" + implementation "com.scalar-labs:scalardb-sql-direct-mode:${scalarDbVersion}" + implementation "info.picocli:picocli-spring-boot-starter:${picoCliVersion}" + implementation "org.springframework.boot:spring-boot-starter-aop:${springBootVersion}" + implementation "org.springframework.retry:spring-retry:${springRetryVersion}" +} + +application { + mainClassName = 'sample.customer.CustomerServiceServer' +} + +docker { + name "sample-customer-service:${project.version}" + files tasks.distTar.outputs, 'customer-service.properties' +} + +distTar { + archiveFileName = "${project.name}.tar" + duplicatesStrategy DuplicatesStrategy.EXCLUDE +} + +distZip { + duplicatesStrategy DuplicatesStrategy.EXCLUDE +} + +installDist { + duplicatesStrategy DuplicatesStrategy.EXCLUDE +} + +archivesBaseName = "sample-customer-service" + +sourceCompatibility = 1.8 +targetCompatibility = 1.8 diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/src/main/java/sample/customer/CustomerService.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/src/main/java/sample/customer/CustomerService.java new file mode 100644 index 00000000..f2eee422 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/src/main/java/sample/customer/CustomerService.java @@ -0,0 +1,229 @@ +package sample.customer; + +import com.google.protobuf.Empty; +import com.scalar.db.sql.springdata.exception.ScalarDbNonTransientException; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.grpc.stub.StreamObserver; +import java.io.Closeable; +import java.util.Optional; +import java.util.function.Supplier; +import javax.annotation.Nullable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.dao.TransientDataAccessException; +import org.springframework.retry.annotation.Backoff; +import org.springframework.retry.annotation.Retryable; +import org.springframework.stereotype.Service; +import sample.customer.domain.model.Customer; +import sample.customer.domain.repository.CustomerRepository; +import sample.rpc.CommitRequest; +import sample.rpc.CustomerServiceGrpc; +import sample.rpc.GetCustomerInfoRequest; +import sample.rpc.GetCustomerInfoResponse; +import sample.rpc.PaymentRequest; +import sample.rpc.PrepareRequest; +import sample.rpc.RepaymentRequest; +import sample.rpc.RollbackRequest; +import sample.rpc.ValidateRequest; + +@Service +public class CustomerService extends CustomerServiceGrpc.CustomerServiceImplBase implements + Closeable { + + private static final Logger logger = LoggerFactory.getLogger(CustomerService.class); + + @Autowired + private CustomerRepository customerRepository; + + public void init() { + // `customerRepository` is set up after the constructor of CustomerService is performed by + // Spring Framework. So, loading initial data should be executed outside the constructor. + loadInitialData(); + } + + private void loadInitialData() { + customerRepository.executeOneshotOperations(() -> { + customerRepository.insertIfNotExists(new Customer(1, "Yamada Taro", 10000, 0)); + customerRepository.insertIfNotExists(new Customer(2, "Yamada Hanako", 10000, 0)); + customerRepository.insertIfNotExists(new Customer(3, "Suzuki Ichiro", 10000, 0)); + return null; + }); + } + + @Retryable( + include = TransientDataAccessException.class, + maxAttempts = 8, + backoff = @Backoff(delay = 1000, maxDelay = 8000, multiplier = 2)) + @Override + public void getCustomerInfo( + GetCustomerInfoRequest request, StreamObserver responseObserver) { + String funcName = "Getting customer info"; + // This function processing operations can be used in both normal transaction and two-phase + // interface transaction. + Supplier operations = () -> { + Customer customer = getCustomer(responseObserver, request.getCustomerId()); + + return GetCustomerInfoResponse.newBuilder() + .setId(customer.customerId) + .setName(customer.name) + .setCreditLimit(customer.creditLimit) + .setCreditTotal(customer.creditTotal) + .build(); + }; + + if (request.hasTransactionId()) { + execAndReturnResponse(funcName, + () -> customerRepository.joinTransactionOnParticipant(request.getTransactionId(), operations), + responseObserver); + } else { + execAndReturnResponse(funcName, + () -> customerRepository.executeOneshotOperations(operations), + responseObserver); + } + } + + @Retryable( + include = TransientDataAccessException.class, + maxAttempts = 8, + backoff = @Backoff(delay = 1000, maxDelay = 8000, multiplier = 2)) + @Override + public void repayment(RepaymentRequest request, StreamObserver responseObserver) { + execAndReturnResponse("Repayment", () -> + customerRepository.executeOneshotOperations(() -> { + Customer customer = getCustomer(responseObserver, request.getCustomerId()); + + int updatedCreditTotal = customer.creditTotal - request.getAmount(); + // Check if over repayment or not + if (updatedCreditTotal < 0) { + String message = String.format( + "Over repayment. creditTotal:%d, payment:%d", customer.creditTotal, + request.getAmount()); + responseObserver.onError( + Status.FAILED_PRECONDITION.withDescription(message).asRuntimeException()); + throw new ScalarDbNonTransientException(message); + } + + // Reduce credit_total for the customer + customerRepository.update(customer.withCreditTotal(updatedCreditTotal)); + + return Empty.getDefaultInstance(); + } + ), responseObserver); + } + + // @Retryable shouldn't be used here as this is used as a participant API and + // will be retried by the coordinator service if needed + @Override + public void payment(PaymentRequest request, StreamObserver responseObserver) { + execAndReturnResponse("Payment", () -> + customerRepository.joinTransactionOnParticipant(request.getTransactionId(), () -> { + Customer customer = getCustomer(responseObserver, request.getCustomerId()); + + int updatedCreditTotal = customer.creditTotal + request.getAmount(); + // Check if the credit total exceeds the credit limit after payment + if (updatedCreditTotal > customer.creditLimit) { + String message = String.format( + "Credit limit exceeded. creditTotal:%d, payment:%d", customer.creditTotal, + request.getAmount()); + responseObserver.onError( + Status.FAILED_PRECONDITION.withDescription(message).asRuntimeException()); + throw new ScalarDbNonTransientException(message); + } + + // Increase credit_total for the customer + customerRepository.update(customer.withCreditTotal(updatedCreditTotal)); + + return Empty.getDefaultInstance(); + }), responseObserver); + } + + // @Retryable shouldn't be put as this is used as a participant API and + // will be retried by the coordinator service if needed + @Override + public void prepare(PrepareRequest request, StreamObserver responseObserver) { + execAndReturnResponse("Prepare", () -> { + customerRepository.prepareTransactionOnParticipant(request.getTransactionId()); + return Empty.getDefaultInstance(); + }, responseObserver); + } + + // @Retryable shouldn't be put as this is used as a participant API and + // will be retried by the coordinator service if needed + @Override + public void validate(ValidateRequest request, StreamObserver responseObserver) { + execAndReturnResponse("Validate", () -> { + customerRepository.validateTransactionOnParticipant(request.getTransactionId()); + return Empty.getDefaultInstance(); + }, responseObserver); + } + + // @Retryable shouldn't be put as this is used as a participant API and + // will be retried by the coordinator service if needed + @Override + public void commit(CommitRequest request, StreamObserver responseObserver) { + execAndReturnResponse("Commit", () -> { + customerRepository.commitTransactionOnParticipant(request.getTransactionId()); + return Empty.getDefaultInstance(); + }, responseObserver); + } + + // @Retryable shouldn't be put as this is used as a participant API and + // will be retried by the coordinator service if needed + @Override + public void rollback(RollbackRequest request, StreamObserver responseObserver) { + execAndReturnResponse("Rollback", () -> { + customerRepository.rollbackTransactionOnParticipant(request.getTransactionId()); + return Empty.getDefaultInstance(); + }, responseObserver); + } + + private Customer getCustomer(StreamObserver responseObserver, int customerId) { + // Retrieve the customer info for the specified customer ID from the customers table. + Optional customerOpt = customerRepository.findById(customerId); + if (!customerOpt.isPresent()) { + String message = "Customer not found: " + customerId; + responseObserver.onError( + Status.NOT_FOUND.withDescription(message).asRuntimeException()); + throw new ScalarDbNonTransientException(message); + } + return customerOpt.get(); + } + + @Nullable + private StatusRuntimeException extractStatusRuntimeException(Throwable e) { + Throwable current = e; + while (current != null) { + if (current instanceof StatusRuntimeException) { + return (StatusRuntimeException) current; + } + current = current.getCause(); + } + return null; + } + + private void execAndReturnResponse(String funcName, Supplier operations, + StreamObserver responseObserver) { + try { + T result = operations.get(); + + responseObserver.onNext(result); + responseObserver.onCompleted(); + } catch (Exception e) { + StatusRuntimeException sre = extractStatusRuntimeException(e); + if (sre != null) { + responseObserver.onError(e); + } else { + String message = funcName + " failed"; + logger.error(message, e); + responseObserver.onError( + Status.INTERNAL.withDescription(message).withCause(e).asRuntimeException()); + } + } + } + + @Override + public void close() { + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/src/main/java/sample/customer/CustomerServiceServer.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/src/main/java/sample/customer/CustomerServiceServer.java new file mode 100644 index 00000000..f24c5508 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/src/main/java/sample/customer/CustomerServiceServer.java @@ -0,0 +1,103 @@ +package sample.customer; + +import com.scalar.db.sql.springdata.EnableScalarDbRepositories; +import io.grpc.Server; +import io.grpc.ServerBuilder; +import java.util.concurrent.Callable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.CommandLineRunner; +import org.springframework.boot.ExitCodeGenerator; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.retry.annotation.EnableRetry; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.IFactory; + +@SpringBootApplication +@EnableScalarDbRepositories(transactionManagerRef = "scalarDbSuspendableTransactionManager") +@EnableRetry +@Command(name = "customer-service-server", description = "Starts Customer Service server.") +public class CustomerServiceServer implements Callable, CommandLineRunner, + ExitCodeGenerator { + + private static final Logger logger = LoggerFactory.getLogger(CustomerServiceServer.class); + + private static final int PORT = 10010; + + @Autowired + private CustomerService service; + + private volatile Server server; + + private int exitCode; + + @Autowired + private IFactory factory; + + public static void main(String[] args) { + // Invoke this application via org.springframework.boot.CommandLineRunner.run + int exitCode = SpringApplication.exit(SpringApplication.run(CustomerServiceServer.class, args)); + System.exit(exitCode); + } + + @Override + public Integer call() throws Exception { + addShutdownHook(); + start(); + blockUntilShutdown(); + return 0; + } + + public void start() throws Exception { + service.init(); + server = ServerBuilder.forPort(PORT).addService(service).build().start(); + logger.info("Customer Service server started, listening on " + PORT); + } + + public void addShutdownHook() { + Runtime.getRuntime() + .addShutdownHook( + new Thread( + () -> { + logger.info("Signal received. Shutting down the server ..."); + shutdown(); + blockUntilShutdown(); + service.close(); + logger.info("The server shut down"); + })); + } + + private void blockUntilShutdown() { + if (server != null) { + try { + server.awaitTermination(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.warn("Unexpectedly received an interruption"); + } + } + } + + private void shutdown() { + if (server != null) { + try { + server.shutdown(); + } catch (Exception e) { + logger.warn("Shutdown() failed", e); + } + } + } + + @Override + public void run(String... args) { + exitCode = new CommandLine(this, factory).execute(args); + } + + @Override + public int getExitCode() { + return exitCode; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/src/main/java/sample/customer/domain/model/Customer.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/src/main/java/sample/customer/domain/model/Customer.java new file mode 100644 index 00000000..088e1112 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/src/main/java/sample/customer/domain/model/Customer.java @@ -0,0 +1,25 @@ +package sample.customer.domain.model; + +import org.springframework.data.annotation.Id; +import org.springframework.data.relational.core.mapping.Table; + +@Table(schema = "customer_service", value = "customers") +public class Customer { + + @Id + public final int customerId; + public final String name; + public final int creditLimit; + public final int creditTotal; + + public Customer(int customerId, String name, int creditLimit, int creditTotal) { + this.customerId = customerId; + this.name = name; + this.creditLimit = creditLimit; + this.creditTotal = creditTotal; + } + + public Customer withCreditTotal(int newCreditTotal) { + return new Customer(this.customerId, this.name, this.creditLimit, newCreditTotal); + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/src/main/java/sample/customer/domain/repository/CustomerRepository.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/src/main/java/sample/customer/domain/repository/CustomerRepository.java new file mode 100644 index 00000000..6250ee6c --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/src/main/java/sample/customer/domain/repository/CustomerRepository.java @@ -0,0 +1,17 @@ +package sample.customer.domain.repository; + +import com.scalar.db.sql.springdata.twopc.ScalarDbTwoPcRepository; +import org.springframework.stereotype.Repository; +import org.springframework.transaction.annotation.Transactional; +import sample.customer.domain.model.Customer; + +@Transactional +@Repository +public interface CustomerRepository extends ScalarDbTwoPcRepository { + + default void insertIfNotExists(Customer customer) { + if (!findById(customer.customerId).isPresent()) { + insert(customer); + } + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/src/main/resources/application.properties b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/src/main/resources/application.properties new file mode 100644 index 00000000..f558776e --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/customer-service/src/main/resources/application.properties @@ -0,0 +1,17 @@ +spring.datasource.driver-class-name=com.scalar.db.sql.jdbc.SqlJdbcDriver +spring.datasource.url=jdbc:scalardb:\ +?scalar.db.sql.connection_mode=direct\ +&scalar.db.storage=multi-storage\ +&scalar.db.multi_storage.storages=cassandra,mysql\ +&scalar.db.multi_storage.storages.cassandra.storage=cassandra\ +&scalar.db.multi_storage.storages.cassandra.contact_points=cassandra\ +&scalar.db.multi_storage.storages.cassandra.username=cassandra\ +&scalar.db.multi_storage.storages.cassandra.password=cassandra\ +&scalar.db.multi_storage.storages.mysql.storage=jdbc\ +&scalar.db.multi_storage.storages.mysql.contact_points=jdbc:mysql://mysql:3306/\ +&scalar.db.multi_storage.storages.mysql.username=root\ +&scalar.db.multi_storage.storages.mysql.password=mysql\ +&scalar.db.multi_storage.namespace_mapping=customer_service:mysql,order_service:cassandra,coordinator:cassandra\ +&scalar.db.multi_storage.default_storage=mysql\ +&scalar.db.sql.default_transaction_mode=two_phase_commit_transaction\ +&scalar.db.consensus_commit.isolation_level=SERIALIZABLE diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/database-cassandra.properties b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/database-cassandra.properties new file mode 100644 index 00000000..a44993ae --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/database-cassandra.properties @@ -0,0 +1,4 @@ +scalar.db.storage=cassandra +scalar.db.contact_points=localhost +scalar.db.username=cassandra +scalar.db.password=cassandra diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/database-mysql.properties b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/database-mysql.properties new file mode 100644 index 00000000..d1389915 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/database-mysql.properties @@ -0,0 +1,4 @@ +scalar.db.storage=jdbc +scalar.db.contact_points=jdbc:mysql://localhost:3306/ +scalar.db.username=root +scalar.db.password=mysql diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/docker-compose.yml b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/docker-compose.yml new file mode 100644 index 00000000..8bf561d6 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/docker-compose.yml @@ -0,0 +1,46 @@ +version: "3.5" +services: + mysql: + image: mysql:8.0 + environment: + MYSQL_ROOT_PASSWORD: mysql + container_name: "mysql-1" + ports: + - "3306:3306" + networks: + - sample-network + cassandra: + image: cassandra:3.11 + container_name: "cassandra-1" + ports: + - "9042:9042" + networks: + - sample-network + customer-service: + image: sample-customer-service:1.0 + container_name: "customer-service-1" + depends_on: + - mysql + - cassandra + entrypoint: /bin/bash + command: ./bin/customer-service + restart: "always" + ports: + - "10010:10010" + networks: + - sample-network + order-service: + image: sample-order-service:1.0 + container_name: "order-service-1" + depends_on: + - cassandra + entrypoint: /bin/bash + command: ./bin/order-service + restart: "always" + ports: + - "10020:10020" + networks: + - sample-network +networks: + sample-network: + name: sample-network diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/gradle/wrapper/gradle-wrapper.jar b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 00000000..7454180f Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/gradle/wrapper/gradle-wrapper.jar differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/gradle/wrapper/gradle-wrapper.properties b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 00000000..69a97150 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,5 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-7.1-bin.zip +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/gradlew b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/gradlew new file mode 100755 index 00000000..744e882e --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/gradlew @@ -0,0 +1,185 @@ +#!/usr/bin/env sh + +# +# Copyright 2015 the original author or authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn () { + echo "$*" +} + +die () { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MSYS* | MINGW* ) + msys=true + ;; + NONSTOP* ) + nonstop=true + ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin or MSYS, switch paths to Windows format before running java +if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=`expr $i + 1` + done + case $i in + 0) set -- ;; + 1) set -- "$args0" ;; + 2) set -- "$args0" "$args1" ;; + 3) set -- "$args0" "$args1" "$args2" ;; + 4) set -- "$args0" "$args1" "$args2" "$args3" ;; + 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Escape application args +save () { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " +} +APP_ARGS=`save "$@"` + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +exec "$JAVACMD" "$@" diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/gradlew.bat b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/gradlew.bat new file mode 100644 index 00000000..107acd32 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/gradlew.bat @@ -0,0 +1,89 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto execute + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/images/ERD.png b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/images/ERD.png new file mode 100644 index 00000000..c0468efa Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/images/ERD.png differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/images/overview.png b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/images/overview.png new file mode 100644 index 00000000..4340b4f5 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/images/overview.png differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/images/seq-diagram-high-level-2pc-api.png b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/images/seq-diagram-high-level-2pc-api.png new file mode 100644 index 00000000..724e52b5 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/images/seq-diagram-high-level-2pc-api.png differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/images/sequence_diagram.png b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/images/sequence_diagram.png new file mode 100644 index 00000000..0317b5f3 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/images/sequence_diagram.png differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/Dockerfile b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/Dockerfile new file mode 100644 index 00000000..b69b5b0d --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/Dockerfile @@ -0,0 +1,11 @@ +FROM openjdk:8 + +WORKDIR / + +ADD order-service.tar . + +WORKDIR /order-service + +ENTRYPOINT ["./bin/sample-order-service"] + +EXPOSE 10020 diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/OrderService.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/OrderService.class new file mode 100644 index 00000000..1c0dafd8 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/OrderService.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/OrderServiceServer.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/OrderServiceServer.class new file mode 100644 index 00000000..7e9fa86f Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/OrderServiceServer.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/domain/model/Item.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/domain/model/Item.class new file mode 100644 index 00000000..fe384a1c Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/domain/model/Item.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/domain/model/Order.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/domain/model/Order.class new file mode 100644 index 00000000..c47c90a1 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/domain/model/Order.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/domain/model/Statement.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/domain/model/Statement.class new file mode 100644 index 00000000..ed52b9b9 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/domain/model/Statement.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/domain/repository/ItemRepository.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/domain/repository/ItemRepository.class new file mode 100644 index 00000000..e74a7a8d Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/domain/repository/ItemRepository.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/domain/repository/OrderRepository.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/domain/repository/OrderRepository.class new file mode 100644 index 00000000..bd05eccf Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/domain/repository/OrderRepository.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/domain/repository/StatementRepository.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/domain/repository/StatementRepository.class new file mode 100644 index 00000000..17edf8d6 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/bin/main/sample/order/domain/repository/StatementRepository.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/build.gradle b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/build.gradle new file mode 100644 index 00000000..07247d74 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/build.gradle @@ -0,0 +1,44 @@ +plugins { + id 'java' + id 'java-library-distribution' + id 'application' + id 'com.palantir.docker' version '0.25.0' +} + +dependencies { + implementation project(':rpc') + + implementation "org.slf4j:slf4j-simple:${slf4jVersion}" + implementation "com.scalar-labs:scalardb-sql-spring-data:${scalarDbVersion}" + implementation "com.scalar-labs:scalardb-sql-direct-mode:${scalarDbVersion}" + implementation "info.picocli:picocli-spring-boot-starter:${picoCliVersion}" + implementation "org.springframework.boot:spring-boot-starter-aop:${springBootVersion}" + implementation "org.springframework.retry:spring-retry:${springRetryVersion}" +} + +application { + mainClassName = 'sample.order.OrderServiceServer' +} + +docker { + name "sample-order-service:${project.version}" + files tasks.distTar.outputs, 'order-service.properties' +} + +distTar { + archiveFileName = "${project.name}.tar" + duplicatesStrategy DuplicatesStrategy.EXCLUDE +} + +distZip { + duplicatesStrategy DuplicatesStrategy.EXCLUDE +} + +installDist { + duplicatesStrategy DuplicatesStrategy.EXCLUDE +} + +archivesBaseName = "sample-order-service" + +sourceCompatibility = 1.8 +targetCompatibility = 1.8 diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/OrderService.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/OrderService.java new file mode 100644 index 00000000..031002fb --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/OrderService.java @@ -0,0 +1,346 @@ +package sample.order; + +import com.scalar.db.sql.springdata.exception.ScalarDbNonTransientException; +import com.scalar.db.sql.springdata.twopc.RemotePrepareCommitPhaseOperations; +import com.scalar.db.sql.springdata.twopc.TwoPcResult; +import io.grpc.ManagedChannel; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.grpc.netty.NettyChannelBuilder; +import io.grpc.stub.StreamObserver; +import java.io.Closeable; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; +import javax.annotation.Nullable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.dao.TransientDataAccessException; +import org.springframework.retry.annotation.Backoff; +import org.springframework.retry.annotation.Retryable; +import org.springframework.stereotype.Service; +import sample.order.domain.model.Item; +import sample.order.domain.model.Order; +import sample.order.domain.model.Statement; +import sample.order.domain.repository.ItemRepository; +import sample.order.domain.repository.OrderRepository; +import sample.order.domain.repository.StatementRepository; +import sample.rpc.CommitRequest; +import sample.rpc.CustomerServiceGrpc; +import sample.rpc.GetCustomerInfoRequest; +import sample.rpc.GetCustomerInfoResponse; +import sample.rpc.GetOrderRequest; +import sample.rpc.GetOrderResponse; +import sample.rpc.GetOrdersRequest; +import sample.rpc.GetOrdersResponse; +import sample.rpc.ItemOrder; +import sample.rpc.OrderServiceGrpc; +import sample.rpc.PaymentRequest; +import sample.rpc.PlaceOrderRequest; +import sample.rpc.PlaceOrderResponse; +import sample.rpc.PrepareRequest; +import sample.rpc.RollbackRequest; +import sample.rpc.ValidateRequest; + +@Service +public class OrderService extends OrderServiceGrpc.OrderServiceImplBase implements Closeable { + + private static final Logger logger = LoggerFactory.getLogger(OrderService.class); + + // For gRPC connection to Customer service + private final ManagedChannel channel; + private final CustomerServiceGrpc.CustomerServiceBlockingStub stub; + + @Autowired + private ItemRepository itemRepository; + @Autowired + private OrderRepository orderRepository; + @Autowired + private StatementRepository statementRepository; + + public OrderService() { + // Initialize the gRPC connection to Customer service + channel = NettyChannelBuilder.forAddress("customer-service", 10010).usePlaintext().build(); + stub = CustomerServiceGrpc.newBlockingStub(channel); + } + + public void init() { + // `itemRepository` is set up after the constructor of CustomerService is performed by + // Spring Framework. So, loading initial data should be executed outside the constructor. + loadInitialData(); + } + + private void loadInitialData() { + orderRepository.executeOneshotOperations(() -> { + itemRepository.insertIfNotExists(new Item(1, "Apple", 1000)); + itemRepository.insertIfNotExists(new Item(2, "Orange", 2000)); + itemRepository.insertIfNotExists(new Item(3, "Grape", 2500)); + itemRepository.insertIfNotExists(new Item(4, "Mango", 5000)); + itemRepository.insertIfNotExists(new Item(5, "Melon", 3000)); + return null; + }); + } + + /** + * Place an order. It's a transaction that spans OrderService and CustomerService + */ + @Retryable( + include = TransientDataAccessException.class, + maxAttempts = 8, + backoff = @Backoff(delay = 1000, maxDelay = 8000, multiplier = 2)) + @Override + public void placeOrder( + PlaceOrderRequest request, StreamObserver responseObserver) { + + execAndReturnResponse("Placing an order", () -> { + // Start a two-phase commit interface transaction + TwoPcResult result = orderRepository.executeTwoPcTransaction(txId -> { + String orderId = UUID.randomUUID().toString(); + Order order = new Order(orderId, request.getCustomerId(), System.currentTimeMillis()); + + // Put the order info into the orders table + orderRepository.insert(order); + + AtomicInteger amount = new AtomicInteger(); + for (ItemOrder itemOrder : request.getItemOrderList()) { + int itemId = itemOrder.getItemId(); + int count = itemOrder.getCount(); + // Retrieve the item info from the items table + Optional itemOpt = itemRepository.findById(itemId); + if (!itemOpt.isPresent()) { + String message = "Item not found: " + itemId; + responseObserver.onError( + Status.NOT_FOUND.withDescription(message).asRuntimeException()); + throw new ScalarDbNonTransientException(message); + } + Item item = itemOpt.get(); + + int cost = item.price * count; + // Put the order statement into the statements table + statementRepository.insert(new Statement(itemId, orderId, count)); + // Calculate the total amount + amount.addAndGet(cost); + } + + // Call the payment endpoint of Customer service + callPaymentEndpoint(txId, request.getCustomerId(), amount.get()); + + return PlaceOrderResponse.newBuilder().setOrderId(orderId).build(); + }, + Collections.singletonList( + RemotePrepareCommitPhaseOperations.createSerializable( + this::callPrepareEndpoint, + this::callValidateEndpoint, + this::callCommitEndpoint, + this::callRollbackEndpoint + ) + ) + ); + + return result.executionPhaseReturnValue(); + }, responseObserver); + } + + private void callPaymentEndpoint(String transactionId, int customerId, int amount) { + stub.payment( + PaymentRequest.newBuilder() + .setTransactionId(transactionId) + .setCustomerId(customerId) + .setAmount(amount) + .build()); + } + + private void callPrepareEndpoint(String transactionId) { + stub.prepare(PrepareRequest.newBuilder().setTransactionId(transactionId).build()); + } + + private void callValidateEndpoint(String transactionId) { + stub.validate(ValidateRequest.newBuilder().setTransactionId(transactionId).build()); + } + + private void callCommitEndpoint(String transactionId) { + stub.commit(CommitRequest.newBuilder().setTransactionId(transactionId).build()); + } + + private void callRollbackEndpoint(String transactionId) { + stub.rollback(RollbackRequest.newBuilder().setTransactionId(transactionId).build()); + } + + /** + * Get Order information by order ID + */ + @Retryable( + include = TransientDataAccessException.class, + maxAttempts = 8, + backoff = @Backoff(delay = 1000, maxDelay = 8000, multiplier = 2)) + @Override + public void getOrder(GetOrderRequest request, StreamObserver responseObserver) { + execAndReturnResponse("Getting an order", () -> { + // Start a two-phase commit interface transaction + TwoPcResult result = orderRepository.executeTwoPcTransaction(txId -> { + // Retrieve the order info for the specified order ID + Optional orderOpt = orderRepository.findById(request.getOrderId()); + if (!orderOpt.isPresent()) { + String message = "Order not found: " + request.getOrderId(); + responseObserver.onError( + Status.NOT_FOUND.withDescription(message).asRuntimeException()); + throw new ScalarDbNonTransientException(message); + } + + // Get the customer name from the Customer service + String customerName = getCustomerName(txId, orderOpt.get().customerId); + + // Make an order protobuf to return + sample.rpc.Order order = getOrderResult(txId, responseObserver, orderOpt.get(), customerName); + return GetOrderResponse.newBuilder().setOrder(order).build(); + }, + Collections.singletonList( + RemotePrepareCommitPhaseOperations.createSerializable( + this::callPrepareEndpoint, + this::callValidateEndpoint, + this::callCommitEndpoint, + this::callRollbackEndpoint + ) + )); + + return result.executionPhaseReturnValue(); + }, responseObserver); + } + + /** + * Get Order information by customer ID + */ + @Retryable( + include = TransientDataAccessException.class, + maxAttempts = 8, + backoff = @Backoff(delay = 1000, maxDelay = 8000, multiplier = 2)) + @Override + public void getOrders( + GetOrdersRequest request, StreamObserver responseObserver) { + execAndReturnResponse("Getting orders", () -> { + // Start a two-phase commit interface transaction + TwoPcResult result = orderRepository.executeTwoPcTransaction(txId -> { + // Get the customer name from the Customer service + String customerName = getCustomerName(txId, request.getCustomerId()); + + // Retrieve the order info for the specified order ID + GetOrdersResponse.Builder builder = GetOrdersResponse.newBuilder(); + for (Order order : orderRepository.findAllByCustomerIdOrderByTimestampDesc( + request.getCustomerId())) { + // Make an order protobuf to return + builder.addOrder(getOrderResult(txId, responseObserver, order, customerName)); + } + return builder.build(); + }, + Collections.singletonList( + RemotePrepareCommitPhaseOperations.createSerializable( + this::callPrepareEndpoint, + this::callValidateEndpoint, + this::callCommitEndpoint, + this::callRollbackEndpoint + ) + )); + + return result.executionPhaseReturnValue(); + }, responseObserver); +} + + private sample.rpc.Order getOrderResult(String transactionId, StreamObserver responseObserver, + Order order, String customerName) { + sample.rpc.Order.Builder orderBuilder = + sample.rpc.Order.newBuilder() + .setOrderId(order.orderId) + .setCustomerId(order.customerId) + .setCustomerName(customerName) + .setTimestamp(order.timestamp); + + int total = 0; + + // Retrieve the order statements for the order ID from the statements table + List statements = statementRepository.findAllByOrderId(order.orderId); + + // Make statements + for (Statement statement : statements) { + sample.rpc.Statement.Builder statementBuilder = sample.rpc.Statement.newBuilder(); + statementBuilder.setItemId(statement.itemId); + + // Retrieve the item data from the items table + Optional itemOpt = itemRepository.findById(statement.itemId); + if (!itemOpt.isPresent()) { + String message = "Item not found: " + statement.itemId; + responseObserver.onError( + Status.NOT_FOUND.withDescription(message).asRuntimeException()); + throw new ScalarDbNonTransientException(message); + } + Item item = itemOpt.get(); + statementBuilder.setItemName(item.name); + statementBuilder.setPrice(item.price); + statementBuilder.setCount(statement.count); + + int itemTotal = item.price * statement.count; + statementBuilder.setTotal(itemTotal); + + orderBuilder.addStatement(statementBuilder); + + total += itemTotal; + } + + return orderBuilder.setTotal(total).build(); + } + + private String getCustomerName(String transactionId, int customerId) { + GetCustomerInfoResponse customerInfo = + stub.getCustomerInfo( + GetCustomerInfoRequest.newBuilder() + .setTransactionId(transactionId) + .setCustomerId(customerId).build()); + return customerInfo.getName(); + } + + @Nullable + private StatusRuntimeException extractStatusRuntimeException(Throwable e) { + Throwable current = e; + while (current != null) { + if (current instanceof StatusRuntimeException) { + return (StatusRuntimeException) current; + } + current = current.getCause(); + } + return null; + } + + private void execAndReturnResponse(String funcName, Supplier operations, + StreamObserver responseObserver) { + try { + T result = operations.get(); + + responseObserver.onNext(result); + responseObserver.onCompleted(); + } catch (Exception e) { + StatusRuntimeException sre = extractStatusRuntimeException(e); + if (sre != null) { + responseObserver.onError(e); + } else { + String message = funcName + " failed"; + logger.error(message, e); + responseObserver.onError( + Status.INTERNAL.withDescription(message).withCause(e).asRuntimeException()); + } + } + } + + @Override + public void close() { + try { + channel.shutdown().awaitTermination(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.warn("Failed to shutdown the channel", e); + } + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/OrderServiceServer.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/OrderServiceServer.java new file mode 100644 index 00000000..60801394 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/OrderServiceServer.java @@ -0,0 +1,101 @@ +package sample.order; + +import com.scalar.db.sql.springdata.EnableScalarDbRepositories; +import io.grpc.Server; +import io.grpc.ServerBuilder; +import java.util.concurrent.Callable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.CommandLineRunner; +import org.springframework.boot.ExitCodeGenerator; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.retry.annotation.EnableRetry; +import picocli.CommandLine; +import picocli.CommandLine.Command; + +@SpringBootApplication +@EnableScalarDbRepositories(transactionManagerRef = "scalarDbSuspendableTransactionManager") +@EnableRetry +@Command(name = "order-service-server", description = "Starts Order Service server.") +public class OrderServiceServer implements Callable, CommandLineRunner, ExitCodeGenerator { + + private static final Logger logger = LoggerFactory.getLogger(OrderServiceServer.class); + + private static final int PORT = 10020; + + @Autowired + private OrderService service; + + private volatile Server server; + + private int exitCode; + + @Autowired + private CommandLine.IFactory factory; + + public static void main(String[] args) { + // Invoke this application via org.springframework.boot.CommandLineRunner.run + int exitCode = SpringApplication.exit(SpringApplication.run(OrderServiceServer.class, args)); + System.exit(exitCode); + } + + @Override + public Integer call() throws Exception { + addShutdownHook(); + start(); + blockUntilShutdown(); + return 0; + } + + public void start() throws Exception { + service.init(); + server = ServerBuilder.forPort(PORT).addService(service).build().start(); + logger.info("Order Service server started, listening on " + PORT); + } + + public void addShutdownHook() { + Runtime.getRuntime() + .addShutdownHook( + new Thread( + () -> { + logger.info("Signal received. Shutting down the server ..."); + shutdown(); + blockUntilShutdown(); + service.close(); + logger.info("The server shut down"); + })); + } + + private void blockUntilShutdown() { + if (server != null) { + try { + server.awaitTermination(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.warn("Unexpectedly received an interruption"); + } + } + } + + private void shutdown() { + if (server != null) { + try { + server.shutdown(); + } catch (Exception e) { + logger.warn("Shutdown() failed", e); + } + } + } + + @Override + public void run(String... args) { + exitCode = new CommandLine(this, factory).execute(args); + } + + @Override + public int getExitCode() { + return exitCode; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/domain/model/Item.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/domain/model/Item.java new file mode 100644 index 00000000..e8ac8f2a --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/domain/model/Item.java @@ -0,0 +1,19 @@ +package sample.order.domain.model; + +import org.springframework.data.annotation.Id; +import org.springframework.data.relational.core.mapping.Table; + +@Table("items") +public class Item { + + @Id + public final int itemId; + public final String name; + public final int price; + + public Item(int itemId, String name, int price) { + this.itemId = itemId; + this.name = name; + this.price = price; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/domain/model/Order.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/domain/model/Order.java new file mode 100644 index 00000000..90178afe --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/domain/model/Order.java @@ -0,0 +1,19 @@ +package sample.order.domain.model; + +import org.springframework.data.annotation.Id; +import org.springframework.data.relational.core.mapping.Table; + +@Table("orders") +public class Order { + + @Id + public final String orderId; + public final int customerId; + public final long timestamp; + + public Order(String orderId, int customerId, long timestamp) { + this.orderId = orderId; + this.customerId = customerId; + this.timestamp = timestamp; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/domain/model/Statement.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/domain/model/Statement.java new file mode 100644 index 00000000..dac619b7 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/domain/model/Statement.java @@ -0,0 +1,21 @@ +package sample.order.domain.model; + +import org.springframework.data.annotation.Id; +import org.springframework.data.relational.core.mapping.Table; + +@Table("statements") +public class Statement { + + // This model is actually accessed via a multi-column index, but Spring Data doesn't support it + // while @Id is always required. So, this @Id annotation is a dummy + @Id + public final int itemId; + public final String orderId; + public final int count; + + public Statement(int itemId, String orderId, int count) { + this.itemId = itemId; + this.orderId = orderId; + this.count = count; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/domain/repository/ItemRepository.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/domain/repository/ItemRepository.java new file mode 100644 index 00000000..9c6c3608 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/domain/repository/ItemRepository.java @@ -0,0 +1,17 @@ +package sample.order.domain.repository; + +import com.scalar.db.sql.springdata.twopc.ScalarDbTwoPcRepository; +import org.springframework.stereotype.Repository; +import org.springframework.transaction.annotation.Transactional; +import sample.order.domain.model.Item; + +@Transactional +@Repository +public interface ItemRepository extends ScalarDbTwoPcRepository { + + default void insertIfNotExists(Item item) { + if (!findById(item.itemId).isPresent()) { + insert(item); + } + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/domain/repository/OrderRepository.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/domain/repository/OrderRepository.java new file mode 100644 index 00000000..5b8c7dc6 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/domain/repository/OrderRepository.java @@ -0,0 +1,14 @@ +package sample.order.domain.repository; + +import com.scalar.db.sql.springdata.twopc.ScalarDbTwoPcRepository; +import java.util.List; +import org.springframework.stereotype.Repository; +import org.springframework.transaction.annotation.Transactional; +import sample.order.domain.model.Order; + +@Transactional +@Repository +public interface OrderRepository extends ScalarDbTwoPcRepository { + + List findAllByCustomerIdOrderByTimestampDesc(int customerId); +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/domain/repository/StatementRepository.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/domain/repository/StatementRepository.java new file mode 100644 index 00000000..75551f96 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/java/sample/order/domain/repository/StatementRepository.java @@ -0,0 +1,14 @@ +package sample.order.domain.repository; + +import com.scalar.db.sql.springdata.twopc.ScalarDbTwoPcRepository; +import java.util.List; +import org.springframework.stereotype.Repository; +import org.springframework.transaction.annotation.Transactional; +import sample.order.domain.model.Statement; + +@Transactional +@Repository +public interface StatementRepository extends ScalarDbTwoPcRepository { + + List findAllByOrderId(String orderId); +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/resources/application.properties b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/resources/application.properties new file mode 100644 index 00000000..157a3741 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/order-service/src/main/resources/application.properties @@ -0,0 +1,10 @@ +spring.datasource.driver-class-name=com.scalar.db.sql.jdbc.SqlJdbcDriver +spring.datasource.url=jdbc:scalardb:\ +?scalar.db.sql.connection_mode=direct\ +&scalar.db.storage=cassandra\ +&scalar.db.contact_points=cassandra\ +&scalar.db.username=cassandra\ +&scalar.db.password=cassandra\ +&scalar.db.sql.default_namespace_name=order_service\ +&scalar.db.sql.default_transaction_mode=two_phase_commit_transaction\ +&scalar.db.consensus_commit.isolation_level=SERIALIZABLE diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequest$1.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequest$1.class new file mode 100644 index 00000000..d855f97d Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequest$1.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequest$Builder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequest$Builder.class new file mode 100644 index 00000000..7f1a4d70 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequest$Builder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequest.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequest.class new file mode 100644 index 00000000..e1dab0fa Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequest.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequestOrBuilder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequestOrBuilder.class new file mode 100644 index 00000000..eb0a7908 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CommitRequestOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$1.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$1.class new file mode 100644 index 00000000..ce669993 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$1.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$2.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$2.class new file mode 100644 index 00000000..760334ef Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$2.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$3.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$3.class new file mode 100644 index 00000000..0596ed8a Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$3.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceBaseDescriptorSupplier.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceBaseDescriptorSupplier.class new file mode 100644 index 00000000..bc7b337d Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceBaseDescriptorSupplier.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceBlockingStub.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceBlockingStub.class new file mode 100644 index 00000000..97dfcec5 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceBlockingStub.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceFileDescriptorSupplier.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceFileDescriptorSupplier.class new file mode 100644 index 00000000..0973f8f5 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceFileDescriptorSupplier.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceFutureStub.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceFutureStub.class new file mode 100644 index 00000000..dd7bd77a Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceFutureStub.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceImplBase.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceImplBase.class new file mode 100644 index 00000000..2a488fcb Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceImplBase.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceMethodDescriptorSupplier.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceMethodDescriptorSupplier.class new file mode 100644 index 00000000..d6f08036 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceMethodDescriptorSupplier.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceStub.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceStub.class new file mode 100644 index 00000000..4bd9bd22 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$CustomerServiceStub.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$MethodHandlers.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$MethodHandlers.class new file mode 100644 index 00000000..9e8b09a9 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc$MethodHandlers.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc.class new file mode 100644 index 00000000..e4bdc4dd Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/CustomerServiceGrpc.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequest$1.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequest$1.class new file mode 100644 index 00000000..8877fdb1 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequest$1.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequest$Builder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequest$Builder.class new file mode 100644 index 00000000..d9e4d6d1 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequest$Builder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequest.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequest.class new file mode 100644 index 00000000..3c3cce48 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequest.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequestOrBuilder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequestOrBuilder.class new file mode 100644 index 00000000..f9298ab3 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoRequestOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponse$1.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponse$1.class new file mode 100644 index 00000000..d72bdc86 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponse$1.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponse$Builder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponse$Builder.class new file mode 100644 index 00000000..4580edfb Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponse$Builder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponse.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponse.class new file mode 100644 index 00000000..8179e701 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponse.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponseOrBuilder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponseOrBuilder.class new file mode 100644 index 00000000..a23bc85d Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetCustomerInfoResponseOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequest$1.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequest$1.class new file mode 100644 index 00000000..859905fa Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequest$1.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequest$Builder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequest$Builder.class new file mode 100644 index 00000000..2d1da8ef Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequest$Builder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequest.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequest.class new file mode 100644 index 00000000..77a76406 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequest.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequestOrBuilder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequestOrBuilder.class new file mode 100644 index 00000000..6948966a Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderRequestOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponse$1.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponse$1.class new file mode 100644 index 00000000..6bb76939 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponse$1.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponse$Builder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponse$Builder.class new file mode 100644 index 00000000..9259e51a Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponse$Builder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponse.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponse.class new file mode 100644 index 00000000..c341a4e7 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponse.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponseOrBuilder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponseOrBuilder.class new file mode 100644 index 00000000..1367fbcd Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrderResponseOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequest$1.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequest$1.class new file mode 100644 index 00000000..f94ef23d Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequest$1.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequest$Builder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequest$Builder.class new file mode 100644 index 00000000..f4531b1b Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequest$Builder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequest.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequest.class new file mode 100644 index 00000000..a60847f6 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequest.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequestOrBuilder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequestOrBuilder.class new file mode 100644 index 00000000..771201fc Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersRequestOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponse$1.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponse$1.class new file mode 100644 index 00000000..0327d415 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponse$1.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponse$Builder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponse$Builder.class new file mode 100644 index 00000000..1df0ac9d Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponse$Builder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponse.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponse.class new file mode 100644 index 00000000..a808d4e7 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponse.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponseOrBuilder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponseOrBuilder.class new file mode 100644 index 00000000..a373f857 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/GetOrdersResponseOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrder$1.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrder$1.class new file mode 100644 index 00000000..f9511e4f Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrder$1.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrder$Builder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrder$Builder.class new file mode 100644 index 00000000..b710ba05 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrder$Builder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrder.class new file mode 100644 index 00000000..bd758ac5 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrderOrBuilder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrderOrBuilder.class new file mode 100644 index 00000000..6385b10f Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ItemOrderOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/Order$1.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/Order$1.class new file mode 100644 index 00000000..22c87fa0 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/Order$1.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/Order$Builder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/Order$Builder.class new file mode 100644 index 00000000..45c3d1f1 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/Order$Builder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/Order.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/Order.class new file mode 100644 index 00000000..4fde3a9d Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/Order.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderOrBuilder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderOrBuilder.class new file mode 100644 index 00000000..7b00a6c7 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$1.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$1.class new file mode 100644 index 00000000..8f1311fd Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$1.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$2.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$2.class new file mode 100644 index 00000000..c71da053 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$2.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$3.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$3.class new file mode 100644 index 00000000..4b027e4a Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$3.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$MethodHandlers.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$MethodHandlers.class new file mode 100644 index 00000000..2473766c Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$MethodHandlers.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceBaseDescriptorSupplier.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceBaseDescriptorSupplier.class new file mode 100644 index 00000000..4cfb4c9a Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceBaseDescriptorSupplier.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceBlockingStub.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceBlockingStub.class new file mode 100644 index 00000000..36703f05 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceBlockingStub.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceFileDescriptorSupplier.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceFileDescriptorSupplier.class new file mode 100644 index 00000000..040167ce Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceFileDescriptorSupplier.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceFutureStub.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceFutureStub.class new file mode 100644 index 00000000..6fa17db6 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceFutureStub.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceImplBase.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceImplBase.class new file mode 100644 index 00000000..3dfa4604 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceImplBase.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceMethodDescriptorSupplier.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceMethodDescriptorSupplier.class new file mode 100644 index 00000000..63886a8d Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceMethodDescriptorSupplier.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceStub.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceStub.class new file mode 100644 index 00000000..39e33807 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc$OrderServiceStub.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc.class new file mode 100644 index 00000000..68229915 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/OrderServiceGrpc.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequest$1.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequest$1.class new file mode 100644 index 00000000..9f549c43 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequest$1.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequest$Builder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequest$Builder.class new file mode 100644 index 00000000..4ea3b665 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequest$Builder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequest.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequest.class new file mode 100644 index 00000000..3b487d42 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequest.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequestOrBuilder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequestOrBuilder.class new file mode 100644 index 00000000..62c247c8 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PaymentRequestOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequest$1.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequest$1.class new file mode 100644 index 00000000..f5583db8 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequest$1.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequest$Builder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequest$Builder.class new file mode 100644 index 00000000..380db594 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequest$Builder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequest.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequest.class new file mode 100644 index 00000000..c14d1f99 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequest.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequestOrBuilder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequestOrBuilder.class new file mode 100644 index 00000000..1c4210eb Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderRequestOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponse$1.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponse$1.class new file mode 100644 index 00000000..589c8d2a Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponse$1.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponse$Builder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponse$Builder.class new file mode 100644 index 00000000..c044a174 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponse$Builder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponse.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponse.class new file mode 100644 index 00000000..dfae371e Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponse.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponseOrBuilder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponseOrBuilder.class new file mode 100644 index 00000000..3c4df9de Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PlaceOrderResponseOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequest$1.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequest$1.class new file mode 100644 index 00000000..7b13187c Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequest$1.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequest$Builder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequest$Builder.class new file mode 100644 index 00000000..75169c2e Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequest$Builder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequest.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequest.class new file mode 100644 index 00000000..404870b0 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequest.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequestOrBuilder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequestOrBuilder.class new file mode 100644 index 00000000..bc8e43ed Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/PrepareRequestOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequest$1.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequest$1.class new file mode 100644 index 00000000..8785bb15 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequest$1.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequest$Builder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequest$Builder.class new file mode 100644 index 00000000..d655e2dd Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequest$Builder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequest.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequest.class new file mode 100644 index 00000000..745aa208 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequest.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequestOrBuilder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequestOrBuilder.class new file mode 100644 index 00000000..9ec59308 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RepaymentRequestOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequest$1.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequest$1.class new file mode 100644 index 00000000..13845027 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequest$1.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequest$Builder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequest$Builder.class new file mode 100644 index 00000000..a4aa7c5c Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequest$Builder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequest.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequest.class new file mode 100644 index 00000000..3e89c92b Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequest.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequestOrBuilder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequestOrBuilder.class new file mode 100644 index 00000000..7e095b3e Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/RollbackRequestOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/Sample.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/Sample.class new file mode 100644 index 00000000..d540e237 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/Sample.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/Statement$1.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/Statement$1.class new file mode 100644 index 00000000..afb8a608 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/Statement$1.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/Statement$Builder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/Statement$Builder.class new file mode 100644 index 00000000..ac03388a Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/Statement$Builder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/Statement.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/Statement.class new file mode 100644 index 00000000..6ae7ab37 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/Statement.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/StatementOrBuilder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/StatementOrBuilder.class new file mode 100644 index 00000000..d90d8041 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/StatementOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequest$1.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequest$1.class new file mode 100644 index 00000000..3d56dd89 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequest$1.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequest$Builder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequest$Builder.class new file mode 100644 index 00000000..18fbfa59 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequest$Builder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequest.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequest.class new file mode 100644 index 00000000..8f51bc8f Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequest.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequestOrBuilder.class b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequestOrBuilder.class new file mode 100644 index 00000000..87e5cac3 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/bin/main/sample/rpc/ValidateRequestOrBuilder.class differ diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/build.gradle b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/build.gradle new file mode 100644 index 00000000..2b0a5c09 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/build.gradle @@ -0,0 +1,34 @@ +plugins { + id 'java' + id 'java-library-distribution' + id 'com.google.protobuf' version '0.9.1' +} + +dependencies { + api "io.grpc:grpc-netty:${grpcVersion}" + api "io.grpc:grpc-protobuf:${grpcVersion}" + api "io.grpc:grpc-stub:${grpcVersion}" + api "io.grpc:grpc-services:${grpcVersion}" +} + +protobuf { + protoc { artifact = "com.google.protobuf:protoc:${protocVersion}" } + plugins { + grpc { artifact = "io.grpc:protoc-gen-grpc-java:${grpcVersion}" } + } + generateProtoTasks { + all()*.plugins { grpc {} } + } + generatedFilesBaseDir = "$projectDir/src" +} + +archivesBaseName = "sample-rpc" + +// The processResources task needs to depend on the generateProto task because it uses the output +// of the the generateProto task +processResources { + dependsOn generateProto +} + +sourceCompatibility = 1.8 +targetCompatibility = 1.8 diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/grpc/sample/rpc/CustomerServiceGrpc.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/grpc/sample/rpc/CustomerServiceGrpc.java new file mode 100644 index 00000000..fe124be8 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/grpc/sample/rpc/CustomerServiceGrpc.java @@ -0,0 +1,819 @@ +package sample.rpc; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + *
+ * for Customer Service
+ * 
+ */ +@javax.annotation.Generated( + value = "by gRPC proto compiler (version 1.53.0)", + comments = "Source: sample.proto") +@io.grpc.stub.annotations.GrpcGenerated +public final class CustomerServiceGrpc { + + private CustomerServiceGrpc() {} + + public static final String SERVICE_NAME = "rpc.CustomerService"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor getGetCustomerInfoMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetCustomerInfo", + requestType = sample.rpc.GetCustomerInfoRequest.class, + responseType = sample.rpc.GetCustomerInfoResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getGetCustomerInfoMethod() { + io.grpc.MethodDescriptor getGetCustomerInfoMethod; + if ((getGetCustomerInfoMethod = CustomerServiceGrpc.getGetCustomerInfoMethod) == null) { + synchronized (CustomerServiceGrpc.class) { + if ((getGetCustomerInfoMethod = CustomerServiceGrpc.getGetCustomerInfoMethod) == null) { + CustomerServiceGrpc.getGetCustomerInfoMethod = getGetCustomerInfoMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetCustomerInfo")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.GetCustomerInfoRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.GetCustomerInfoResponse.getDefaultInstance())) + .setSchemaDescriptor(new CustomerServiceMethodDescriptorSupplier("GetCustomerInfo")) + .build(); + } + } + } + return getGetCustomerInfoMethod; + } + + private static volatile io.grpc.MethodDescriptor getRepaymentMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "Repayment", + requestType = sample.rpc.RepaymentRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getRepaymentMethod() { + io.grpc.MethodDescriptor getRepaymentMethod; + if ((getRepaymentMethod = CustomerServiceGrpc.getRepaymentMethod) == null) { + synchronized (CustomerServiceGrpc.class) { + if ((getRepaymentMethod = CustomerServiceGrpc.getRepaymentMethod) == null) { + CustomerServiceGrpc.getRepaymentMethod = getRepaymentMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "Repayment")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.RepaymentRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor(new CustomerServiceMethodDescriptorSupplier("Repayment")) + .build(); + } + } + } + return getRepaymentMethod; + } + + private static volatile io.grpc.MethodDescriptor getPaymentMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "Payment", + requestType = sample.rpc.PaymentRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getPaymentMethod() { + io.grpc.MethodDescriptor getPaymentMethod; + if ((getPaymentMethod = CustomerServiceGrpc.getPaymentMethod) == null) { + synchronized (CustomerServiceGrpc.class) { + if ((getPaymentMethod = CustomerServiceGrpc.getPaymentMethod) == null) { + CustomerServiceGrpc.getPaymentMethod = getPaymentMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "Payment")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.PaymentRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor(new CustomerServiceMethodDescriptorSupplier("Payment")) + .build(); + } + } + } + return getPaymentMethod; + } + + private static volatile io.grpc.MethodDescriptor getPrepareMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "Prepare", + requestType = sample.rpc.PrepareRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getPrepareMethod() { + io.grpc.MethodDescriptor getPrepareMethod; + if ((getPrepareMethod = CustomerServiceGrpc.getPrepareMethod) == null) { + synchronized (CustomerServiceGrpc.class) { + if ((getPrepareMethod = CustomerServiceGrpc.getPrepareMethod) == null) { + CustomerServiceGrpc.getPrepareMethod = getPrepareMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "Prepare")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.PrepareRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor(new CustomerServiceMethodDescriptorSupplier("Prepare")) + .build(); + } + } + } + return getPrepareMethod; + } + + private static volatile io.grpc.MethodDescriptor getValidateMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "Validate", + requestType = sample.rpc.ValidateRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getValidateMethod() { + io.grpc.MethodDescriptor getValidateMethod; + if ((getValidateMethod = CustomerServiceGrpc.getValidateMethod) == null) { + synchronized (CustomerServiceGrpc.class) { + if ((getValidateMethod = CustomerServiceGrpc.getValidateMethod) == null) { + CustomerServiceGrpc.getValidateMethod = getValidateMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "Validate")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.ValidateRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor(new CustomerServiceMethodDescriptorSupplier("Validate")) + .build(); + } + } + } + return getValidateMethod; + } + + private static volatile io.grpc.MethodDescriptor getCommitMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "Commit", + requestType = sample.rpc.CommitRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getCommitMethod() { + io.grpc.MethodDescriptor getCommitMethod; + if ((getCommitMethod = CustomerServiceGrpc.getCommitMethod) == null) { + synchronized (CustomerServiceGrpc.class) { + if ((getCommitMethod = CustomerServiceGrpc.getCommitMethod) == null) { + CustomerServiceGrpc.getCommitMethod = getCommitMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "Commit")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.CommitRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor(new CustomerServiceMethodDescriptorSupplier("Commit")) + .build(); + } + } + } + return getCommitMethod; + } + + private static volatile io.grpc.MethodDescriptor getRollbackMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "Rollback", + requestType = sample.rpc.RollbackRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getRollbackMethod() { + io.grpc.MethodDescriptor getRollbackMethod; + if ((getRollbackMethod = CustomerServiceGrpc.getRollbackMethod) == null) { + synchronized (CustomerServiceGrpc.class) { + if ((getRollbackMethod = CustomerServiceGrpc.getRollbackMethod) == null) { + CustomerServiceGrpc.getRollbackMethod = getRollbackMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "Rollback")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.RollbackRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor(new CustomerServiceMethodDescriptorSupplier("Rollback")) + .build(); + } + } + } + return getRollbackMethod; + } + + /** + * Creates a new async stub that supports all call types for the service + */ + public static CustomerServiceStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public CustomerServiceStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new CustomerServiceStub(channel, callOptions); + } + }; + return CustomerServiceStub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static CustomerServiceBlockingStub newBlockingStub( + io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public CustomerServiceBlockingStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new CustomerServiceBlockingStub(channel, callOptions); + } + }; + return CustomerServiceBlockingStub.newStub(factory, channel); + } + + /** + * Creates a new ListenableFuture-style stub that supports unary calls on the service + */ + public static CustomerServiceFutureStub newFutureStub( + io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public CustomerServiceFutureStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new CustomerServiceFutureStub(channel, callOptions); + } + }; + return CustomerServiceFutureStub.newStub(factory, channel); + } + + /** + *
+   * for Customer Service
+   * 
+ */ + public static abstract class CustomerServiceImplBase implements io.grpc.BindableService { + + /** + *
+     * Get customer information
+     * 
+ */ + public void getCustomerInfo(sample.rpc.GetCustomerInfoRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getGetCustomerInfoMethod(), responseObserver); + } + + /** + *
+     * Credit card repayment
+     * 
+ */ + public void repayment(sample.rpc.RepaymentRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getRepaymentMethod(), responseObserver); + } + + /** + *
+     * Credit card payment
+     * 
+ */ + public void payment(sample.rpc.PaymentRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getPaymentMethod(), responseObserver); + } + + /** + *
+     * Prepare the transaction
+     * 
+ */ + public void prepare(sample.rpc.PrepareRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getPrepareMethod(), responseObserver); + } + + /** + *
+     * Validate the transaction
+     * 
+ */ + public void validate(sample.rpc.ValidateRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getValidateMethod(), responseObserver); + } + + /** + *
+     * Commit the transaction
+     * 
+ */ + public void commit(sample.rpc.CommitRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getCommitMethod(), responseObserver); + } + + /** + *
+     * Rollback the transaction
+     * 
+ */ + public void rollback(sample.rpc.RollbackRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getRollbackMethod(), responseObserver); + } + + @java.lang.Override public final io.grpc.ServerServiceDefinition bindService() { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getGetCustomerInfoMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + sample.rpc.GetCustomerInfoRequest, + sample.rpc.GetCustomerInfoResponse>( + this, METHODID_GET_CUSTOMER_INFO))) + .addMethod( + getRepaymentMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + sample.rpc.RepaymentRequest, + com.google.protobuf.Empty>( + this, METHODID_REPAYMENT))) + .addMethod( + getPaymentMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + sample.rpc.PaymentRequest, + com.google.protobuf.Empty>( + this, METHODID_PAYMENT))) + .addMethod( + getPrepareMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + sample.rpc.PrepareRequest, + com.google.protobuf.Empty>( + this, METHODID_PREPARE))) + .addMethod( + getValidateMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + sample.rpc.ValidateRequest, + com.google.protobuf.Empty>( + this, METHODID_VALIDATE))) + .addMethod( + getCommitMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + sample.rpc.CommitRequest, + com.google.protobuf.Empty>( + this, METHODID_COMMIT))) + .addMethod( + getRollbackMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + sample.rpc.RollbackRequest, + com.google.protobuf.Empty>( + this, METHODID_ROLLBACK))) + .build(); + } + } + + /** + *
+   * for Customer Service
+   * 
+ */ + public static final class CustomerServiceStub extends io.grpc.stub.AbstractAsyncStub { + private CustomerServiceStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected CustomerServiceStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new CustomerServiceStub(channel, callOptions); + } + + /** + *
+     * Get customer information
+     * 
+ */ + public void getCustomerInfo(sample.rpc.GetCustomerInfoRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetCustomerInfoMethod(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Credit card repayment
+     * 
+ */ + public void repayment(sample.rpc.RepaymentRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getRepaymentMethod(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Credit card payment
+     * 
+ */ + public void payment(sample.rpc.PaymentRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getPaymentMethod(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Prepare the transaction
+     * 
+ */ + public void prepare(sample.rpc.PrepareRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getPrepareMethod(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Validate the transaction
+     * 
+ */ + public void validate(sample.rpc.ValidateRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getValidateMethod(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Commit the transaction
+     * 
+ */ + public void commit(sample.rpc.CommitRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCommitMethod(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Rollback the transaction
+     * 
+ */ + public void rollback(sample.rpc.RollbackRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getRollbackMethod(), getCallOptions()), request, responseObserver); + } + } + + /** + *
+   * for Customer Service
+   * 
+ */ + public static final class CustomerServiceBlockingStub extends io.grpc.stub.AbstractBlockingStub { + private CustomerServiceBlockingStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected CustomerServiceBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new CustomerServiceBlockingStub(channel, callOptions); + } + + /** + *
+     * Get customer information
+     * 
+ */ + public sample.rpc.GetCustomerInfoResponse getCustomerInfo(sample.rpc.GetCustomerInfoRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetCustomerInfoMethod(), getCallOptions(), request); + } + + /** + *
+     * Credit card repayment
+     * 
+ */ + public com.google.protobuf.Empty repayment(sample.rpc.RepaymentRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getRepaymentMethod(), getCallOptions(), request); + } + + /** + *
+     * Credit card payment
+     * 
+ */ + public com.google.protobuf.Empty payment(sample.rpc.PaymentRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getPaymentMethod(), getCallOptions(), request); + } + + /** + *
+     * Prepare the transaction
+     * 
+ */ + public com.google.protobuf.Empty prepare(sample.rpc.PrepareRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getPrepareMethod(), getCallOptions(), request); + } + + /** + *
+     * Validate the transaction
+     * 
+ */ + public com.google.protobuf.Empty validate(sample.rpc.ValidateRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getValidateMethod(), getCallOptions(), request); + } + + /** + *
+     * Commit the transaction
+     * 
+ */ + public com.google.protobuf.Empty commit(sample.rpc.CommitRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCommitMethod(), getCallOptions(), request); + } + + /** + *
+     * Rollback the transaction
+     * 
+ */ + public com.google.protobuf.Empty rollback(sample.rpc.RollbackRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getRollbackMethod(), getCallOptions(), request); + } + } + + /** + *
+   * for Customer Service
+   * 
+ */ + public static final class CustomerServiceFutureStub extends io.grpc.stub.AbstractFutureStub { + private CustomerServiceFutureStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected CustomerServiceFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new CustomerServiceFutureStub(channel, callOptions); + } + + /** + *
+     * Get customer information
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture getCustomerInfo( + sample.rpc.GetCustomerInfoRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetCustomerInfoMethod(), getCallOptions()), request); + } + + /** + *
+     * Credit card repayment
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture repayment( + sample.rpc.RepaymentRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getRepaymentMethod(), getCallOptions()), request); + } + + /** + *
+     * Credit card payment
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture payment( + sample.rpc.PaymentRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getPaymentMethod(), getCallOptions()), request); + } + + /** + *
+     * Prepare the transaction
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture prepare( + sample.rpc.PrepareRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getPrepareMethod(), getCallOptions()), request); + } + + /** + *
+     * Validate the transaction
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture validate( + sample.rpc.ValidateRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getValidateMethod(), getCallOptions()), request); + } + + /** + *
+     * Commit the transaction
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture commit( + sample.rpc.CommitRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCommitMethod(), getCallOptions()), request); + } + + /** + *
+     * Rollback the transaction
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture rollback( + sample.rpc.RollbackRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getRollbackMethod(), getCallOptions()), request); + } + } + + private static final int METHODID_GET_CUSTOMER_INFO = 0; + private static final int METHODID_REPAYMENT = 1; + private static final int METHODID_PAYMENT = 2; + private static final int METHODID_PREPARE = 3; + private static final int METHODID_VALIDATE = 4; + private static final int METHODID_COMMIT = 5; + private static final int METHODID_ROLLBACK = 6; + + private static final class MethodHandlers implements + io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final CustomerServiceImplBase serviceImpl; + private final int methodId; + + MethodHandlers(CustomerServiceImplBase serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_GET_CUSTOMER_INFO: + serviceImpl.getCustomerInfo((sample.rpc.GetCustomerInfoRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_REPAYMENT: + serviceImpl.repayment((sample.rpc.RepaymentRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_PAYMENT: + serviceImpl.payment((sample.rpc.PaymentRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_PREPARE: + serviceImpl.prepare((sample.rpc.PrepareRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_VALIDATE: + serviceImpl.validate((sample.rpc.ValidateRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_COMMIT: + serviceImpl.commit((sample.rpc.CommitRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_ROLLBACK: + serviceImpl.rollback((sample.rpc.RollbackRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + default: + throw new AssertionError(); + } + } + } + + private static abstract class CustomerServiceBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, io.grpc.protobuf.ProtoServiceDescriptorSupplier { + CustomerServiceBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return sample.rpc.Sample.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("CustomerService"); + } + } + + private static final class CustomerServiceFileDescriptorSupplier + extends CustomerServiceBaseDescriptorSupplier { + CustomerServiceFileDescriptorSupplier() {} + } + + private static final class CustomerServiceMethodDescriptorSupplier + extends CustomerServiceBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final String methodName; + + CustomerServiceMethodDescriptorSupplier(String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (CustomerServiceGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = result = io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new CustomerServiceFileDescriptorSupplier()) + .addMethod(getGetCustomerInfoMethod()) + .addMethod(getRepaymentMethod()) + .addMethod(getPaymentMethod()) + .addMethod(getPrepareMethod()) + .addMethod(getValidateMethod()) + .addMethod(getCommitMethod()) + .addMethod(getRollbackMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/grpc/sample/rpc/OrderServiceGrpc.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/grpc/sample/rpc/OrderServiceGrpc.java new file mode 100644 index 00000000..30f68eda --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/grpc/sample/rpc/OrderServiceGrpc.java @@ -0,0 +1,475 @@ +package sample.rpc; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + *
+ * for Order Service
+ * 
+ */ +@javax.annotation.Generated( + value = "by gRPC proto compiler (version 1.53.0)", + comments = "Source: sample.proto") +@io.grpc.stub.annotations.GrpcGenerated +public final class OrderServiceGrpc { + + private OrderServiceGrpc() {} + + public static final String SERVICE_NAME = "rpc.OrderService"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor getPlaceOrderMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "PlaceOrder", + requestType = sample.rpc.PlaceOrderRequest.class, + responseType = sample.rpc.PlaceOrderResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getPlaceOrderMethod() { + io.grpc.MethodDescriptor getPlaceOrderMethod; + if ((getPlaceOrderMethod = OrderServiceGrpc.getPlaceOrderMethod) == null) { + synchronized (OrderServiceGrpc.class) { + if ((getPlaceOrderMethod = OrderServiceGrpc.getPlaceOrderMethod) == null) { + OrderServiceGrpc.getPlaceOrderMethod = getPlaceOrderMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "PlaceOrder")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.PlaceOrderRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.PlaceOrderResponse.getDefaultInstance())) + .setSchemaDescriptor(new OrderServiceMethodDescriptorSupplier("PlaceOrder")) + .build(); + } + } + } + return getPlaceOrderMethod; + } + + private static volatile io.grpc.MethodDescriptor getGetOrderMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetOrder", + requestType = sample.rpc.GetOrderRequest.class, + responseType = sample.rpc.GetOrderResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getGetOrderMethod() { + io.grpc.MethodDescriptor getGetOrderMethod; + if ((getGetOrderMethod = OrderServiceGrpc.getGetOrderMethod) == null) { + synchronized (OrderServiceGrpc.class) { + if ((getGetOrderMethod = OrderServiceGrpc.getGetOrderMethod) == null) { + OrderServiceGrpc.getGetOrderMethod = getGetOrderMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetOrder")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.GetOrderRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.GetOrderResponse.getDefaultInstance())) + .setSchemaDescriptor(new OrderServiceMethodDescriptorSupplier("GetOrder")) + .build(); + } + } + } + return getGetOrderMethod; + } + + private static volatile io.grpc.MethodDescriptor getGetOrdersMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetOrders", + requestType = sample.rpc.GetOrdersRequest.class, + responseType = sample.rpc.GetOrdersResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getGetOrdersMethod() { + io.grpc.MethodDescriptor getGetOrdersMethod; + if ((getGetOrdersMethod = OrderServiceGrpc.getGetOrdersMethod) == null) { + synchronized (OrderServiceGrpc.class) { + if ((getGetOrdersMethod = OrderServiceGrpc.getGetOrdersMethod) == null) { + OrderServiceGrpc.getGetOrdersMethod = getGetOrdersMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetOrders")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.GetOrdersRequest.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + sample.rpc.GetOrdersResponse.getDefaultInstance())) + .setSchemaDescriptor(new OrderServiceMethodDescriptorSupplier("GetOrders")) + .build(); + } + } + } + return getGetOrdersMethod; + } + + /** + * Creates a new async stub that supports all call types for the service + */ + public static OrderServiceStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public OrderServiceStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new OrderServiceStub(channel, callOptions); + } + }; + return OrderServiceStub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static OrderServiceBlockingStub newBlockingStub( + io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public OrderServiceBlockingStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new OrderServiceBlockingStub(channel, callOptions); + } + }; + return OrderServiceBlockingStub.newStub(factory, channel); + } + + /** + * Creates a new ListenableFuture-style stub that supports unary calls on the service + */ + public static OrderServiceFutureStub newFutureStub( + io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public OrderServiceFutureStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new OrderServiceFutureStub(channel, callOptions); + } + }; + return OrderServiceFutureStub.newStub(factory, channel); + } + + /** + *
+   * for Order Service
+   * 
+ */ + public static abstract class OrderServiceImplBase implements io.grpc.BindableService { + + /** + *
+     * Place an order. It's a transaction that spans OrderService and CustomerService
+     * 
+ */ + public void placeOrder(sample.rpc.PlaceOrderRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getPlaceOrderMethod(), responseObserver); + } + + /** + *
+     * Get Order information by order ID
+     * 
+ */ + public void getOrder(sample.rpc.GetOrderRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getGetOrderMethod(), responseObserver); + } + + /** + *
+     * Get Order information by customer ID
+     * 
+ */ + public void getOrders(sample.rpc.GetOrdersRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getGetOrdersMethod(), responseObserver); + } + + @java.lang.Override public final io.grpc.ServerServiceDefinition bindService() { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getPlaceOrderMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + sample.rpc.PlaceOrderRequest, + sample.rpc.PlaceOrderResponse>( + this, METHODID_PLACE_ORDER))) + .addMethod( + getGetOrderMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + sample.rpc.GetOrderRequest, + sample.rpc.GetOrderResponse>( + this, METHODID_GET_ORDER))) + .addMethod( + getGetOrdersMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + sample.rpc.GetOrdersRequest, + sample.rpc.GetOrdersResponse>( + this, METHODID_GET_ORDERS))) + .build(); + } + } + + /** + *
+   * for Order Service
+   * 
+ */ + public static final class OrderServiceStub extends io.grpc.stub.AbstractAsyncStub { + private OrderServiceStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected OrderServiceStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new OrderServiceStub(channel, callOptions); + } + + /** + *
+     * Place an order. It's a transaction that spans OrderService and CustomerService
+     * 
+ */ + public void placeOrder(sample.rpc.PlaceOrderRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getPlaceOrderMethod(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Get Order information by order ID
+     * 
+ */ + public void getOrder(sample.rpc.GetOrderRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetOrderMethod(), getCallOptions()), request, responseObserver); + } + + /** + *
+     * Get Order information by customer ID
+     * 
+ */ + public void getOrders(sample.rpc.GetOrdersRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetOrdersMethod(), getCallOptions()), request, responseObserver); + } + } + + /** + *
+   * for Order Service
+   * 
+ */ + public static final class OrderServiceBlockingStub extends io.grpc.stub.AbstractBlockingStub { + private OrderServiceBlockingStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected OrderServiceBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new OrderServiceBlockingStub(channel, callOptions); + } + + /** + *
+     * Place an order. It's a transaction that spans OrderService and CustomerService
+     * 
+ */ + public sample.rpc.PlaceOrderResponse placeOrder(sample.rpc.PlaceOrderRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getPlaceOrderMethod(), getCallOptions(), request); + } + + /** + *
+     * Get Order information by order ID
+     * 
+ */ + public sample.rpc.GetOrderResponse getOrder(sample.rpc.GetOrderRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetOrderMethod(), getCallOptions(), request); + } + + /** + *
+     * Get Order information by customer ID
+     * 
+ */ + public sample.rpc.GetOrdersResponse getOrders(sample.rpc.GetOrdersRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetOrdersMethod(), getCallOptions(), request); + } + } + + /** + *
+   * for Order Service
+   * 
+ */ + public static final class OrderServiceFutureStub extends io.grpc.stub.AbstractFutureStub { + private OrderServiceFutureStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected OrderServiceFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new OrderServiceFutureStub(channel, callOptions); + } + + /** + *
+     * Place an order. It's a transaction that spans OrderService and CustomerService
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture placeOrder( + sample.rpc.PlaceOrderRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getPlaceOrderMethod(), getCallOptions()), request); + } + + /** + *
+     * Get Order information by order ID
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture getOrder( + sample.rpc.GetOrderRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetOrderMethod(), getCallOptions()), request); + } + + /** + *
+     * Get Order information by customer ID
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture getOrders( + sample.rpc.GetOrdersRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetOrdersMethod(), getCallOptions()), request); + } + } + + private static final int METHODID_PLACE_ORDER = 0; + private static final int METHODID_GET_ORDER = 1; + private static final int METHODID_GET_ORDERS = 2; + + private static final class MethodHandlers implements + io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final OrderServiceImplBase serviceImpl; + private final int methodId; + + MethodHandlers(OrderServiceImplBase serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_PLACE_ORDER: + serviceImpl.placeOrder((sample.rpc.PlaceOrderRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_GET_ORDER: + serviceImpl.getOrder((sample.rpc.GetOrderRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_GET_ORDERS: + serviceImpl.getOrders((sample.rpc.GetOrdersRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + default: + throw new AssertionError(); + } + } + } + + private static abstract class OrderServiceBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, io.grpc.protobuf.ProtoServiceDescriptorSupplier { + OrderServiceBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return sample.rpc.Sample.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("OrderService"); + } + } + + private static final class OrderServiceFileDescriptorSupplier + extends OrderServiceBaseDescriptorSupplier { + OrderServiceFileDescriptorSupplier() {} + } + + private static final class OrderServiceMethodDescriptorSupplier + extends OrderServiceBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final String methodName; + + OrderServiceMethodDescriptorSupplier(String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (OrderServiceGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = result = io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new OrderServiceFileDescriptorSupplier()) + .addMethod(getPlaceOrderMethod()) + .addMethod(getGetOrderMethod()) + .addMethod(getGetOrdersMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/CommitRequest.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/CommitRequest.java new file mode 100644 index 00000000..3fc815f0 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/CommitRequest.java @@ -0,0 +1,509 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.CommitRequest} + */ +public final class CommitRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.CommitRequest) + CommitRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use CommitRequest.newBuilder() to construct. + private CommitRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private CommitRequest() { + transactionId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new CommitRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_CommitRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_CommitRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.CommitRequest.class, sample.rpc.CommitRequest.Builder.class); + } + + public static final int TRANSACTION_ID_FIELD_NUMBER = 1; + @SuppressWarnings("serial") + private volatile java.lang.Object transactionId_ = ""; + /** + * string transaction_id = 1; + * @return The transactionId. + */ + @java.lang.Override + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } + } + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(transactionId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, transactionId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(transactionId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, transactionId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.CommitRequest)) { + return super.equals(obj); + } + sample.rpc.CommitRequest other = (sample.rpc.CommitRequest) obj; + + if (!getTransactionId() + .equals(other.getTransactionId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TRANSACTION_ID_FIELD_NUMBER; + hash = (53 * hash) + getTransactionId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.CommitRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.CommitRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.CommitRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.CommitRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.CommitRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.CommitRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.CommitRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.CommitRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.CommitRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.CommitRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.CommitRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.CommitRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.CommitRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.CommitRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.CommitRequest) + sample.rpc.CommitRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_CommitRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_CommitRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.CommitRequest.class, sample.rpc.CommitRequest.Builder.class); + } + + // Construct using sample.rpc.CommitRequest.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + transactionId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_CommitRequest_descriptor; + } + + @java.lang.Override + public sample.rpc.CommitRequest getDefaultInstanceForType() { + return sample.rpc.CommitRequest.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.CommitRequest build() { + sample.rpc.CommitRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.CommitRequest buildPartial() { + sample.rpc.CommitRequest result = new sample.rpc.CommitRequest(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.CommitRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.transactionId_ = transactionId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.CommitRequest) { + return mergeFrom((sample.rpc.CommitRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.CommitRequest other) { + if (other == sample.rpc.CommitRequest.getDefaultInstance()) return this; + if (!other.getTransactionId().isEmpty()) { + transactionId_ = other.transactionId_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + transactionId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private java.lang.Object transactionId_ = ""; + /** + * string transaction_id = 1; + * @return The transactionId. + */ + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string transaction_id = 1; + * @param value The transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionId( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * string transaction_id = 1; + * @return This builder for chaining. + */ + public Builder clearTransactionId() { + transactionId_ = getDefaultInstance().getTransactionId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * string transaction_id = 1; + * @param value The bytes for transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.CommitRequest) + } + + // @@protoc_insertion_point(class_scope:rpc.CommitRequest) + private static final sample.rpc.CommitRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.CommitRequest(); + } + + public static sample.rpc.CommitRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CommitRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.CommitRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/CommitRequestOrBuilder.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/CommitRequestOrBuilder.java new file mode 100644 index 00000000..cef5ed22 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/CommitRequestOrBuilder.java @@ -0,0 +1,21 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface CommitRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.CommitRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * string transaction_id = 1; + * @return The transactionId. + */ + java.lang.String getTransactionId(); + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + com.google.protobuf.ByteString + getTransactionIdBytes(); +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoRequest.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoRequest.java new file mode 100644 index 00000000..bdfb3bed --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoRequest.java @@ -0,0 +1,599 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.GetCustomerInfoRequest} + */ +public final class GetCustomerInfoRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.GetCustomerInfoRequest) + GetCustomerInfoRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use GetCustomerInfoRequest.newBuilder() to construct. + private GetCustomerInfoRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private GetCustomerInfoRequest() { + transactionId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new GetCustomerInfoRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetCustomerInfoRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetCustomerInfoRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetCustomerInfoRequest.class, sample.rpc.GetCustomerInfoRequest.Builder.class); + } + + private int bitField0_; + public static final int TRANSACTION_ID_FIELD_NUMBER = 1; + @SuppressWarnings("serial") + private volatile java.lang.Object transactionId_ = ""; + /** + * optional string transaction_id = 1; + * @return Whether the transactionId field is set. + */ + @java.lang.Override + public boolean hasTransactionId() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * optional string transaction_id = 1; + * @return The transactionId. + */ + @java.lang.Override + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } + } + /** + * optional string transaction_id = 1; + * @return The bytes for transactionId. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CUSTOMER_ID_FIELD_NUMBER = 2; + private int customerId_ = 0; + /** + * int32 customer_id = 2; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, transactionId_); + } + if (customerId_ != 0) { + output.writeInt32(2, customerId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, transactionId_); + } + if (customerId_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, customerId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.GetCustomerInfoRequest)) { + return super.equals(obj); + } + sample.rpc.GetCustomerInfoRequest other = (sample.rpc.GetCustomerInfoRequest) obj; + + if (hasTransactionId() != other.hasTransactionId()) return false; + if (hasTransactionId()) { + if (!getTransactionId() + .equals(other.getTransactionId())) return false; + } + if (getCustomerId() + != other.getCustomerId()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasTransactionId()) { + hash = (37 * hash) + TRANSACTION_ID_FIELD_NUMBER; + hash = (53 * hash) + getTransactionId().hashCode(); + } + hash = (37 * hash) + CUSTOMER_ID_FIELD_NUMBER; + hash = (53 * hash) + getCustomerId(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.GetCustomerInfoRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetCustomerInfoRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetCustomerInfoRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetCustomerInfoRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetCustomerInfoRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetCustomerInfoRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetCustomerInfoRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetCustomerInfoRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.GetCustomerInfoRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.GetCustomerInfoRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.GetCustomerInfoRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetCustomerInfoRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.GetCustomerInfoRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.GetCustomerInfoRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.GetCustomerInfoRequest) + sample.rpc.GetCustomerInfoRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetCustomerInfoRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetCustomerInfoRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetCustomerInfoRequest.class, sample.rpc.GetCustomerInfoRequest.Builder.class); + } + + // Construct using sample.rpc.GetCustomerInfoRequest.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + transactionId_ = ""; + customerId_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_GetCustomerInfoRequest_descriptor; + } + + @java.lang.Override + public sample.rpc.GetCustomerInfoRequest getDefaultInstanceForType() { + return sample.rpc.GetCustomerInfoRequest.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.GetCustomerInfoRequest build() { + sample.rpc.GetCustomerInfoRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.GetCustomerInfoRequest buildPartial() { + sample.rpc.GetCustomerInfoRequest result = new sample.rpc.GetCustomerInfoRequest(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.GetCustomerInfoRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.transactionId_ = transactionId_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.customerId_ = customerId_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.GetCustomerInfoRequest) { + return mergeFrom((sample.rpc.GetCustomerInfoRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.GetCustomerInfoRequest other) { + if (other == sample.rpc.GetCustomerInfoRequest.getDefaultInstance()) return this; + if (other.hasTransactionId()) { + transactionId_ = other.transactionId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getCustomerId() != 0) { + setCustomerId(other.getCustomerId()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + transactionId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: { + customerId_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private java.lang.Object transactionId_ = ""; + /** + * optional string transaction_id = 1; + * @return Whether the transactionId field is set. + */ + public boolean hasTransactionId() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * optional string transaction_id = 1; + * @return The transactionId. + */ + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string transaction_id = 1; + * @return The bytes for transactionId. + */ + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string transaction_id = 1; + * @param value The transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionId( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * optional string transaction_id = 1; + * @return This builder for chaining. + */ + public Builder clearTransactionId() { + transactionId_ = getDefaultInstance().getTransactionId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * optional string transaction_id = 1; + * @param value The bytes for transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int customerId_ ; + /** + * int32 customer_id = 2; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + /** + * int32 customer_id = 2; + * @param value The customerId to set. + * @return This builder for chaining. + */ + public Builder setCustomerId(int value) { + + customerId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * int32 customer_id = 2; + * @return This builder for chaining. + */ + public Builder clearCustomerId() { + bitField0_ = (bitField0_ & ~0x00000002); + customerId_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.GetCustomerInfoRequest) + } + + // @@protoc_insertion_point(class_scope:rpc.GetCustomerInfoRequest) + private static final sample.rpc.GetCustomerInfoRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.GetCustomerInfoRequest(); + } + + public static sample.rpc.GetCustomerInfoRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetCustomerInfoRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.GetCustomerInfoRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoRequestOrBuilder.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoRequestOrBuilder.java new file mode 100644 index 00000000..fa5a8165 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoRequestOrBuilder.java @@ -0,0 +1,32 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface GetCustomerInfoRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.GetCustomerInfoRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * optional string transaction_id = 1; + * @return Whether the transactionId field is set. + */ + boolean hasTransactionId(); + /** + * optional string transaction_id = 1; + * @return The transactionId. + */ + java.lang.String getTransactionId(); + /** + * optional string transaction_id = 1; + * @return The bytes for transactionId. + */ + com.google.protobuf.ByteString + getTransactionIdBytes(); + + /** + * int32 customer_id = 2; + * @return The customerId. + */ + int getCustomerId(); +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoResponse.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoResponse.java new file mode 100644 index 00000000..c89a0a68 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoResponse.java @@ -0,0 +1,707 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.GetCustomerInfoResponse} + */ +public final class GetCustomerInfoResponse extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.GetCustomerInfoResponse) + GetCustomerInfoResponseOrBuilder { +private static final long serialVersionUID = 0L; + // Use GetCustomerInfoResponse.newBuilder() to construct. + private GetCustomerInfoResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private GetCustomerInfoResponse() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new GetCustomerInfoResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetCustomerInfoResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetCustomerInfoResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetCustomerInfoResponse.class, sample.rpc.GetCustomerInfoResponse.Builder.class); + } + + public static final int ID_FIELD_NUMBER = 1; + private int id_ = 0; + /** + * int32 id = 1; + * @return The id. + */ + @java.lang.Override + public int getId() { + return id_; + } + + public static final int NAME_FIELD_NUMBER = 2; + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + /** + * string name = 2; + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * string name = 2; + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CREDIT_LIMIT_FIELD_NUMBER = 3; + private int creditLimit_ = 0; + /** + * int32 credit_limit = 3; + * @return The creditLimit. + */ + @java.lang.Override + public int getCreditLimit() { + return creditLimit_; + } + + public static final int CREDIT_TOTAL_FIELD_NUMBER = 4; + private int creditTotal_ = 0; + /** + * int32 credit_total = 4; + * @return The creditTotal. + */ + @java.lang.Override + public int getCreditTotal() { + return creditTotal_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (id_ != 0) { + output.writeInt32(1, id_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, name_); + } + if (creditLimit_ != 0) { + output.writeInt32(3, creditLimit_); + } + if (creditTotal_ != 0) { + output.writeInt32(4, creditTotal_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (id_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, id_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, name_); + } + if (creditLimit_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(3, creditLimit_); + } + if (creditTotal_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(4, creditTotal_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.GetCustomerInfoResponse)) { + return super.equals(obj); + } + sample.rpc.GetCustomerInfoResponse other = (sample.rpc.GetCustomerInfoResponse) obj; + + if (getId() + != other.getId()) return false; + if (!getName() + .equals(other.getName())) return false; + if (getCreditLimit() + != other.getCreditLimit()) return false; + if (getCreditTotal() + != other.getCreditTotal()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ID_FIELD_NUMBER; + hash = (53 * hash) + getId(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + CREDIT_LIMIT_FIELD_NUMBER; + hash = (53 * hash) + getCreditLimit(); + hash = (37 * hash) + CREDIT_TOTAL_FIELD_NUMBER; + hash = (53 * hash) + getCreditTotal(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.GetCustomerInfoResponse parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetCustomerInfoResponse parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetCustomerInfoResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetCustomerInfoResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetCustomerInfoResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetCustomerInfoResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetCustomerInfoResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetCustomerInfoResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.GetCustomerInfoResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.GetCustomerInfoResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.GetCustomerInfoResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetCustomerInfoResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.GetCustomerInfoResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.GetCustomerInfoResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.GetCustomerInfoResponse) + sample.rpc.GetCustomerInfoResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetCustomerInfoResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetCustomerInfoResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetCustomerInfoResponse.class, sample.rpc.GetCustomerInfoResponse.Builder.class); + } + + // Construct using sample.rpc.GetCustomerInfoResponse.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + id_ = 0; + name_ = ""; + creditLimit_ = 0; + creditTotal_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_GetCustomerInfoResponse_descriptor; + } + + @java.lang.Override + public sample.rpc.GetCustomerInfoResponse getDefaultInstanceForType() { + return sample.rpc.GetCustomerInfoResponse.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.GetCustomerInfoResponse build() { + sample.rpc.GetCustomerInfoResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.GetCustomerInfoResponse buildPartial() { + sample.rpc.GetCustomerInfoResponse result = new sample.rpc.GetCustomerInfoResponse(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.GetCustomerInfoResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.id_ = id_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.creditLimit_ = creditLimit_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.creditTotal_ = creditTotal_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.GetCustomerInfoResponse) { + return mergeFrom((sample.rpc.GetCustomerInfoResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.GetCustomerInfoResponse other) { + if (other == sample.rpc.GetCustomerInfoResponse.getDefaultInstance()) return this; + if (other.getId() != 0) { + setId(other.getId()); + } + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.getCreditLimit() != 0) { + setCreditLimit(other.getCreditLimit()); + } + if (other.getCreditTotal() != 0) { + setCreditTotal(other.getCreditTotal()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + id_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: { + creditLimit_ = input.readInt32(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 32: { + creditTotal_ = input.readInt32(); + bitField0_ |= 0x00000008; + break; + } // case 32 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private int id_ ; + /** + * int32 id = 1; + * @return The id. + */ + @java.lang.Override + public int getId() { + return id_; + } + /** + * int32 id = 1; + * @param value The id to set. + * @return This builder for chaining. + */ + public Builder setId(int value) { + + id_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * int32 id = 1; + * @return This builder for chaining. + */ + public Builder clearId() { + bitField0_ = (bitField0_ & ~0x00000001); + id_ = 0; + onChanged(); + return this; + } + + private java.lang.Object name_ = ""; + /** + * string name = 2; + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string name = 2; + * @return The bytes for name. + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string name = 2; + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + name_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * string name = 2; + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * string name = 2; + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private int creditLimit_ ; + /** + * int32 credit_limit = 3; + * @return The creditLimit. + */ + @java.lang.Override + public int getCreditLimit() { + return creditLimit_; + } + /** + * int32 credit_limit = 3; + * @param value The creditLimit to set. + * @return This builder for chaining. + */ + public Builder setCreditLimit(int value) { + + creditLimit_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * int32 credit_limit = 3; + * @return This builder for chaining. + */ + public Builder clearCreditLimit() { + bitField0_ = (bitField0_ & ~0x00000004); + creditLimit_ = 0; + onChanged(); + return this; + } + + private int creditTotal_ ; + /** + * int32 credit_total = 4; + * @return The creditTotal. + */ + @java.lang.Override + public int getCreditTotal() { + return creditTotal_; + } + /** + * int32 credit_total = 4; + * @param value The creditTotal to set. + * @return This builder for chaining. + */ + public Builder setCreditTotal(int value) { + + creditTotal_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + /** + * int32 credit_total = 4; + * @return This builder for chaining. + */ + public Builder clearCreditTotal() { + bitField0_ = (bitField0_ & ~0x00000008); + creditTotal_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.GetCustomerInfoResponse) + } + + // @@protoc_insertion_point(class_scope:rpc.GetCustomerInfoResponse) + private static final sample.rpc.GetCustomerInfoResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.GetCustomerInfoResponse(); + } + + public static sample.rpc.GetCustomerInfoResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetCustomerInfoResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.GetCustomerInfoResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoResponseOrBuilder.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoResponseOrBuilder.java new file mode 100644 index 00000000..48ddb5a0 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetCustomerInfoResponseOrBuilder.java @@ -0,0 +1,39 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface GetCustomerInfoResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.GetCustomerInfoResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * int32 id = 1; + * @return The id. + */ + int getId(); + + /** + * string name = 2; + * @return The name. + */ + java.lang.String getName(); + /** + * string name = 2; + * @return The bytes for name. + */ + com.google.protobuf.ByteString + getNameBytes(); + + /** + * int32 credit_limit = 3; + * @return The creditLimit. + */ + int getCreditLimit(); + + /** + * int32 credit_total = 4; + * @return The creditTotal. + */ + int getCreditTotal(); +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderRequest.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderRequest.java new file mode 100644 index 00000000..f9bae4f4 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderRequest.java @@ -0,0 +1,509 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.GetOrderRequest} + */ +public final class GetOrderRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.GetOrderRequest) + GetOrderRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use GetOrderRequest.newBuilder() to construct. + private GetOrderRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private GetOrderRequest() { + orderId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new GetOrderRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetOrderRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetOrderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetOrderRequest.class, sample.rpc.GetOrderRequest.Builder.class); + } + + public static final int ORDER_ID_FIELD_NUMBER = 1; + @SuppressWarnings("serial") + private volatile java.lang.Object orderId_ = ""; + /** + * string order_id = 1; + * @return The orderId. + */ + @java.lang.Override + public java.lang.String getOrderId() { + java.lang.Object ref = orderId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + orderId_ = s; + return s; + } + } + /** + * string order_id = 1; + * @return The bytes for orderId. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getOrderIdBytes() { + java.lang.Object ref = orderId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + orderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(orderId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, orderId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(orderId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, orderId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.GetOrderRequest)) { + return super.equals(obj); + } + sample.rpc.GetOrderRequest other = (sample.rpc.GetOrderRequest) obj; + + if (!getOrderId() + .equals(other.getOrderId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ORDER_ID_FIELD_NUMBER; + hash = (53 * hash) + getOrderId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.GetOrderRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrderRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrderRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrderRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrderRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrderRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrderRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetOrderRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.GetOrderRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.GetOrderRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.GetOrderRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetOrderRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.GetOrderRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.GetOrderRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.GetOrderRequest) + sample.rpc.GetOrderRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetOrderRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetOrderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetOrderRequest.class, sample.rpc.GetOrderRequest.Builder.class); + } + + // Construct using sample.rpc.GetOrderRequest.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + orderId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_GetOrderRequest_descriptor; + } + + @java.lang.Override + public sample.rpc.GetOrderRequest getDefaultInstanceForType() { + return sample.rpc.GetOrderRequest.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.GetOrderRequest build() { + sample.rpc.GetOrderRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.GetOrderRequest buildPartial() { + sample.rpc.GetOrderRequest result = new sample.rpc.GetOrderRequest(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.GetOrderRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.orderId_ = orderId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.GetOrderRequest) { + return mergeFrom((sample.rpc.GetOrderRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.GetOrderRequest other) { + if (other == sample.rpc.GetOrderRequest.getDefaultInstance()) return this; + if (!other.getOrderId().isEmpty()) { + orderId_ = other.orderId_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + orderId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private java.lang.Object orderId_ = ""; + /** + * string order_id = 1; + * @return The orderId. + */ + public java.lang.String getOrderId() { + java.lang.Object ref = orderId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + orderId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string order_id = 1; + * @return The bytes for orderId. + */ + public com.google.protobuf.ByteString + getOrderIdBytes() { + java.lang.Object ref = orderId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + orderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string order_id = 1; + * @param value The orderId to set. + * @return This builder for chaining. + */ + public Builder setOrderId( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + orderId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * string order_id = 1; + * @return This builder for chaining. + */ + public Builder clearOrderId() { + orderId_ = getDefaultInstance().getOrderId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * string order_id = 1; + * @param value The bytes for orderId to set. + * @return This builder for chaining. + */ + public Builder setOrderIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + orderId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.GetOrderRequest) + } + + // @@protoc_insertion_point(class_scope:rpc.GetOrderRequest) + private static final sample.rpc.GetOrderRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.GetOrderRequest(); + } + + public static sample.rpc.GetOrderRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetOrderRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.GetOrderRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderRequestOrBuilder.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderRequestOrBuilder.java new file mode 100644 index 00000000..dfe42efc --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderRequestOrBuilder.java @@ -0,0 +1,21 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface GetOrderRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.GetOrderRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * string order_id = 1; + * @return The orderId. + */ + java.lang.String getOrderId(); + /** + * string order_id = 1; + * @return The bytes for orderId. + */ + com.google.protobuf.ByteString + getOrderIdBytes(); +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderResponse.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderResponse.java new file mode 100644 index 00000000..c707fd38 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderResponse.java @@ -0,0 +1,554 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.GetOrderResponse} + */ +public final class GetOrderResponse extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.GetOrderResponse) + GetOrderResponseOrBuilder { +private static final long serialVersionUID = 0L; + // Use GetOrderResponse.newBuilder() to construct. + private GetOrderResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private GetOrderResponse() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new GetOrderResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetOrderResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetOrderResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetOrderResponse.class, sample.rpc.GetOrderResponse.Builder.class); + } + + public static final int ORDER_FIELD_NUMBER = 1; + private sample.rpc.Order order_; + /** + * .rpc.Order order = 1; + * @return Whether the order field is set. + */ + @java.lang.Override + public boolean hasOrder() { + return order_ != null; + } + /** + * .rpc.Order order = 1; + * @return The order. + */ + @java.lang.Override + public sample.rpc.Order getOrder() { + return order_ == null ? sample.rpc.Order.getDefaultInstance() : order_; + } + /** + * .rpc.Order order = 1; + */ + @java.lang.Override + public sample.rpc.OrderOrBuilder getOrderOrBuilder() { + return order_ == null ? sample.rpc.Order.getDefaultInstance() : order_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (order_ != null) { + output.writeMessage(1, getOrder()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (order_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, getOrder()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.GetOrderResponse)) { + return super.equals(obj); + } + sample.rpc.GetOrderResponse other = (sample.rpc.GetOrderResponse) obj; + + if (hasOrder() != other.hasOrder()) return false; + if (hasOrder()) { + if (!getOrder() + .equals(other.getOrder())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasOrder()) { + hash = (37 * hash) + ORDER_FIELD_NUMBER; + hash = (53 * hash) + getOrder().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.GetOrderResponse parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrderResponse parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrderResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrderResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrderResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrderResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrderResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetOrderResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.GetOrderResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.GetOrderResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.GetOrderResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetOrderResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.GetOrderResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.GetOrderResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.GetOrderResponse) + sample.rpc.GetOrderResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetOrderResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetOrderResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetOrderResponse.class, sample.rpc.GetOrderResponse.Builder.class); + } + + // Construct using sample.rpc.GetOrderResponse.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + order_ = null; + if (orderBuilder_ != null) { + orderBuilder_.dispose(); + orderBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_GetOrderResponse_descriptor; + } + + @java.lang.Override + public sample.rpc.GetOrderResponse getDefaultInstanceForType() { + return sample.rpc.GetOrderResponse.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.GetOrderResponse build() { + sample.rpc.GetOrderResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.GetOrderResponse buildPartial() { + sample.rpc.GetOrderResponse result = new sample.rpc.GetOrderResponse(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.GetOrderResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.order_ = orderBuilder_ == null + ? order_ + : orderBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.GetOrderResponse) { + return mergeFrom((sample.rpc.GetOrderResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.GetOrderResponse other) { + if (other == sample.rpc.GetOrderResponse.getDefaultInstance()) return this; + if (other.hasOrder()) { + mergeOrder(other.getOrder()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + input.readMessage( + getOrderFieldBuilder().getBuilder(), + extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private sample.rpc.Order order_; + private com.google.protobuf.SingleFieldBuilderV3< + sample.rpc.Order, sample.rpc.Order.Builder, sample.rpc.OrderOrBuilder> orderBuilder_; + /** + * .rpc.Order order = 1; + * @return Whether the order field is set. + */ + public boolean hasOrder() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * .rpc.Order order = 1; + * @return The order. + */ + public sample.rpc.Order getOrder() { + if (orderBuilder_ == null) { + return order_ == null ? sample.rpc.Order.getDefaultInstance() : order_; + } else { + return orderBuilder_.getMessage(); + } + } + /** + * .rpc.Order order = 1; + */ + public Builder setOrder(sample.rpc.Order value) { + if (orderBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + order_ = value; + } else { + orderBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * .rpc.Order order = 1; + */ + public Builder setOrder( + sample.rpc.Order.Builder builderForValue) { + if (orderBuilder_ == null) { + order_ = builderForValue.build(); + } else { + orderBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * .rpc.Order order = 1; + */ + public Builder mergeOrder(sample.rpc.Order value) { + if (orderBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) && + order_ != null && + order_ != sample.rpc.Order.getDefaultInstance()) { + getOrderBuilder().mergeFrom(value); + } else { + order_ = value; + } + } else { + orderBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * .rpc.Order order = 1; + */ + public Builder clearOrder() { + bitField0_ = (bitField0_ & ~0x00000001); + order_ = null; + if (orderBuilder_ != null) { + orderBuilder_.dispose(); + orderBuilder_ = null; + } + onChanged(); + return this; + } + /** + * .rpc.Order order = 1; + */ + public sample.rpc.Order.Builder getOrderBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getOrderFieldBuilder().getBuilder(); + } + /** + * .rpc.Order order = 1; + */ + public sample.rpc.OrderOrBuilder getOrderOrBuilder() { + if (orderBuilder_ != null) { + return orderBuilder_.getMessageOrBuilder(); + } else { + return order_ == null ? + sample.rpc.Order.getDefaultInstance() : order_; + } + } + /** + * .rpc.Order order = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + sample.rpc.Order, sample.rpc.Order.Builder, sample.rpc.OrderOrBuilder> + getOrderFieldBuilder() { + if (orderBuilder_ == null) { + orderBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + sample.rpc.Order, sample.rpc.Order.Builder, sample.rpc.OrderOrBuilder>( + getOrder(), + getParentForChildren(), + isClean()); + order_ = null; + } + return orderBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.GetOrderResponse) + } + + // @@protoc_insertion_point(class_scope:rpc.GetOrderResponse) + private static final sample.rpc.GetOrderResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.GetOrderResponse(); + } + + public static sample.rpc.GetOrderResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetOrderResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.GetOrderResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderResponseOrBuilder.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderResponseOrBuilder.java new file mode 100644 index 00000000..a29de622 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrderResponseOrBuilder.java @@ -0,0 +1,24 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface GetOrderResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.GetOrderResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * .rpc.Order order = 1; + * @return Whether the order field is set. + */ + boolean hasOrder(); + /** + * .rpc.Order order = 1; + * @return The order. + */ + sample.rpc.Order getOrder(); + /** + * .rpc.Order order = 1; + */ + sample.rpc.OrderOrBuilder getOrderOrBuilder(); +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersRequest.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersRequest.java new file mode 100644 index 00000000..dee46ffb --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersRequest.java @@ -0,0 +1,439 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.GetOrdersRequest} + */ +public final class GetOrdersRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.GetOrdersRequest) + GetOrdersRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use GetOrdersRequest.newBuilder() to construct. + private GetOrdersRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private GetOrdersRequest() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new GetOrdersRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetOrdersRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetOrdersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetOrdersRequest.class, sample.rpc.GetOrdersRequest.Builder.class); + } + + public static final int CUSTOMER_ID_FIELD_NUMBER = 1; + private int customerId_ = 0; + /** + * int32 customer_id = 1; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (customerId_ != 0) { + output.writeInt32(1, customerId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (customerId_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, customerId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.GetOrdersRequest)) { + return super.equals(obj); + } + sample.rpc.GetOrdersRequest other = (sample.rpc.GetOrdersRequest) obj; + + if (getCustomerId() + != other.getCustomerId()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + CUSTOMER_ID_FIELD_NUMBER; + hash = (53 * hash) + getCustomerId(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.GetOrdersRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrdersRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrdersRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrdersRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrdersRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrdersRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrdersRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetOrdersRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.GetOrdersRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.GetOrdersRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.GetOrdersRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetOrdersRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.GetOrdersRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.GetOrdersRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.GetOrdersRequest) + sample.rpc.GetOrdersRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetOrdersRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetOrdersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetOrdersRequest.class, sample.rpc.GetOrdersRequest.Builder.class); + } + + // Construct using sample.rpc.GetOrdersRequest.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + customerId_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_GetOrdersRequest_descriptor; + } + + @java.lang.Override + public sample.rpc.GetOrdersRequest getDefaultInstanceForType() { + return sample.rpc.GetOrdersRequest.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.GetOrdersRequest build() { + sample.rpc.GetOrdersRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.GetOrdersRequest buildPartial() { + sample.rpc.GetOrdersRequest result = new sample.rpc.GetOrdersRequest(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.GetOrdersRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.customerId_ = customerId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.GetOrdersRequest) { + return mergeFrom((sample.rpc.GetOrdersRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.GetOrdersRequest other) { + if (other == sample.rpc.GetOrdersRequest.getDefaultInstance()) return this; + if (other.getCustomerId() != 0) { + setCustomerId(other.getCustomerId()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + customerId_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private int customerId_ ; + /** + * int32 customer_id = 1; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + /** + * int32 customer_id = 1; + * @param value The customerId to set. + * @return This builder for chaining. + */ + public Builder setCustomerId(int value) { + + customerId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * int32 customer_id = 1; + * @return This builder for chaining. + */ + public Builder clearCustomerId() { + bitField0_ = (bitField0_ & ~0x00000001); + customerId_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.GetOrdersRequest) + } + + // @@protoc_insertion_point(class_scope:rpc.GetOrdersRequest) + private static final sample.rpc.GetOrdersRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.GetOrdersRequest(); + } + + public static sample.rpc.GetOrdersRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetOrdersRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.GetOrdersRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersRequestOrBuilder.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersRequestOrBuilder.java new file mode 100644 index 00000000..6a536778 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersRequestOrBuilder.java @@ -0,0 +1,15 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface GetOrdersRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.GetOrdersRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * int32 customer_id = 1; + * @return The customerId. + */ + int getCustomerId(); +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersResponse.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersResponse.java new file mode 100644 index 00000000..e4b1b286 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersResponse.java @@ -0,0 +1,727 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.GetOrdersResponse} + */ +public final class GetOrdersResponse extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.GetOrdersResponse) + GetOrdersResponseOrBuilder { +private static final long serialVersionUID = 0L; + // Use GetOrdersResponse.newBuilder() to construct. + private GetOrdersResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private GetOrdersResponse() { + order_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new GetOrdersResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetOrdersResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetOrdersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetOrdersResponse.class, sample.rpc.GetOrdersResponse.Builder.class); + } + + public static final int ORDER_FIELD_NUMBER = 1; + @SuppressWarnings("serial") + private java.util.List order_; + /** + * repeated .rpc.Order order = 1; + */ + @java.lang.Override + public java.util.List getOrderList() { + return order_; + } + /** + * repeated .rpc.Order order = 1; + */ + @java.lang.Override + public java.util.List + getOrderOrBuilderList() { + return order_; + } + /** + * repeated .rpc.Order order = 1; + */ + @java.lang.Override + public int getOrderCount() { + return order_.size(); + } + /** + * repeated .rpc.Order order = 1; + */ + @java.lang.Override + public sample.rpc.Order getOrder(int index) { + return order_.get(index); + } + /** + * repeated .rpc.Order order = 1; + */ + @java.lang.Override + public sample.rpc.OrderOrBuilder getOrderOrBuilder( + int index) { + return order_.get(index); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < order_.size(); i++) { + output.writeMessage(1, order_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < order_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, order_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.GetOrdersResponse)) { + return super.equals(obj); + } + sample.rpc.GetOrdersResponse other = (sample.rpc.GetOrdersResponse) obj; + + if (!getOrderList() + .equals(other.getOrderList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getOrderCount() > 0) { + hash = (37 * hash) + ORDER_FIELD_NUMBER; + hash = (53 * hash) + getOrderList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.GetOrdersResponse parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrdersResponse parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrdersResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrdersResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrdersResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.GetOrdersResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.GetOrdersResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetOrdersResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.GetOrdersResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.GetOrdersResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.GetOrdersResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.GetOrdersResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.GetOrdersResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.GetOrdersResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.GetOrdersResponse) + sample.rpc.GetOrdersResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_GetOrdersResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_GetOrdersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.GetOrdersResponse.class, sample.rpc.GetOrdersResponse.Builder.class); + } + + // Construct using sample.rpc.GetOrdersResponse.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (orderBuilder_ == null) { + order_ = java.util.Collections.emptyList(); + } else { + order_ = null; + orderBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_GetOrdersResponse_descriptor; + } + + @java.lang.Override + public sample.rpc.GetOrdersResponse getDefaultInstanceForType() { + return sample.rpc.GetOrdersResponse.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.GetOrdersResponse build() { + sample.rpc.GetOrdersResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.GetOrdersResponse buildPartial() { + sample.rpc.GetOrdersResponse result = new sample.rpc.GetOrdersResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(sample.rpc.GetOrdersResponse result) { + if (orderBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + order_ = java.util.Collections.unmodifiableList(order_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.order_ = order_; + } else { + result.order_ = orderBuilder_.build(); + } + } + + private void buildPartial0(sample.rpc.GetOrdersResponse result) { + int from_bitField0_ = bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.GetOrdersResponse) { + return mergeFrom((sample.rpc.GetOrdersResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.GetOrdersResponse other) { + if (other == sample.rpc.GetOrdersResponse.getDefaultInstance()) return this; + if (orderBuilder_ == null) { + if (!other.order_.isEmpty()) { + if (order_.isEmpty()) { + order_ = other.order_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureOrderIsMutable(); + order_.addAll(other.order_); + } + onChanged(); + } + } else { + if (!other.order_.isEmpty()) { + if (orderBuilder_.isEmpty()) { + orderBuilder_.dispose(); + orderBuilder_ = null; + order_ = other.order_; + bitField0_ = (bitField0_ & ~0x00000001); + orderBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getOrderFieldBuilder() : null; + } else { + orderBuilder_.addAllMessages(other.order_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + sample.rpc.Order m = + input.readMessage( + sample.rpc.Order.parser(), + extensionRegistry); + if (orderBuilder_ == null) { + ensureOrderIsMutable(); + order_.add(m); + } else { + orderBuilder_.addMessage(m); + } + break; + } // case 10 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private java.util.List order_ = + java.util.Collections.emptyList(); + private void ensureOrderIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + order_ = new java.util.ArrayList(order_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + sample.rpc.Order, sample.rpc.Order.Builder, sample.rpc.OrderOrBuilder> orderBuilder_; + + /** + * repeated .rpc.Order order = 1; + */ + public java.util.List getOrderList() { + if (orderBuilder_ == null) { + return java.util.Collections.unmodifiableList(order_); + } else { + return orderBuilder_.getMessageList(); + } + } + /** + * repeated .rpc.Order order = 1; + */ + public int getOrderCount() { + if (orderBuilder_ == null) { + return order_.size(); + } else { + return orderBuilder_.getCount(); + } + } + /** + * repeated .rpc.Order order = 1; + */ + public sample.rpc.Order getOrder(int index) { + if (orderBuilder_ == null) { + return order_.get(index); + } else { + return orderBuilder_.getMessage(index); + } + } + /** + * repeated .rpc.Order order = 1; + */ + public Builder setOrder( + int index, sample.rpc.Order value) { + if (orderBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOrderIsMutable(); + order_.set(index, value); + onChanged(); + } else { + orderBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .rpc.Order order = 1; + */ + public Builder setOrder( + int index, sample.rpc.Order.Builder builderForValue) { + if (orderBuilder_ == null) { + ensureOrderIsMutable(); + order_.set(index, builderForValue.build()); + onChanged(); + } else { + orderBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .rpc.Order order = 1; + */ + public Builder addOrder(sample.rpc.Order value) { + if (orderBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOrderIsMutable(); + order_.add(value); + onChanged(); + } else { + orderBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .rpc.Order order = 1; + */ + public Builder addOrder( + int index, sample.rpc.Order value) { + if (orderBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOrderIsMutable(); + order_.add(index, value); + onChanged(); + } else { + orderBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .rpc.Order order = 1; + */ + public Builder addOrder( + sample.rpc.Order.Builder builderForValue) { + if (orderBuilder_ == null) { + ensureOrderIsMutable(); + order_.add(builderForValue.build()); + onChanged(); + } else { + orderBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .rpc.Order order = 1; + */ + public Builder addOrder( + int index, sample.rpc.Order.Builder builderForValue) { + if (orderBuilder_ == null) { + ensureOrderIsMutable(); + order_.add(index, builderForValue.build()); + onChanged(); + } else { + orderBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .rpc.Order order = 1; + */ + public Builder addAllOrder( + java.lang.Iterable values) { + if (orderBuilder_ == null) { + ensureOrderIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, order_); + onChanged(); + } else { + orderBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .rpc.Order order = 1; + */ + public Builder clearOrder() { + if (orderBuilder_ == null) { + order_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + orderBuilder_.clear(); + } + return this; + } + /** + * repeated .rpc.Order order = 1; + */ + public Builder removeOrder(int index) { + if (orderBuilder_ == null) { + ensureOrderIsMutable(); + order_.remove(index); + onChanged(); + } else { + orderBuilder_.remove(index); + } + return this; + } + /** + * repeated .rpc.Order order = 1; + */ + public sample.rpc.Order.Builder getOrderBuilder( + int index) { + return getOrderFieldBuilder().getBuilder(index); + } + /** + * repeated .rpc.Order order = 1; + */ + public sample.rpc.OrderOrBuilder getOrderOrBuilder( + int index) { + if (orderBuilder_ == null) { + return order_.get(index); } else { + return orderBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .rpc.Order order = 1; + */ + public java.util.List + getOrderOrBuilderList() { + if (orderBuilder_ != null) { + return orderBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(order_); + } + } + /** + * repeated .rpc.Order order = 1; + */ + public sample.rpc.Order.Builder addOrderBuilder() { + return getOrderFieldBuilder().addBuilder( + sample.rpc.Order.getDefaultInstance()); + } + /** + * repeated .rpc.Order order = 1; + */ + public sample.rpc.Order.Builder addOrderBuilder( + int index) { + return getOrderFieldBuilder().addBuilder( + index, sample.rpc.Order.getDefaultInstance()); + } + /** + * repeated .rpc.Order order = 1; + */ + public java.util.List + getOrderBuilderList() { + return getOrderFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + sample.rpc.Order, sample.rpc.Order.Builder, sample.rpc.OrderOrBuilder> + getOrderFieldBuilder() { + if (orderBuilder_ == null) { + orderBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + sample.rpc.Order, sample.rpc.Order.Builder, sample.rpc.OrderOrBuilder>( + order_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + order_ = null; + } + return orderBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.GetOrdersResponse) + } + + // @@protoc_insertion_point(class_scope:rpc.GetOrdersResponse) + private static final sample.rpc.GetOrdersResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.GetOrdersResponse(); + } + + public static sample.rpc.GetOrdersResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetOrdersResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.GetOrdersResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersResponseOrBuilder.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersResponseOrBuilder.java new file mode 100644 index 00000000..fff2fcfd --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/GetOrdersResponseOrBuilder.java @@ -0,0 +1,33 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface GetOrdersResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.GetOrdersResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * repeated .rpc.Order order = 1; + */ + java.util.List + getOrderList(); + /** + * repeated .rpc.Order order = 1; + */ + sample.rpc.Order getOrder(int index); + /** + * repeated .rpc.Order order = 1; + */ + int getOrderCount(); + /** + * repeated .rpc.Order order = 1; + */ + java.util.List + getOrderOrBuilderList(); + /** + * repeated .rpc.Order order = 1; + */ + sample.rpc.OrderOrBuilder getOrderOrBuilder( + int index); +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/ItemOrder.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/ItemOrder.java new file mode 100644 index 00000000..e23e2a4d --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/ItemOrder.java @@ -0,0 +1,505 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.ItemOrder} + */ +public final class ItemOrder extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.ItemOrder) + ItemOrderOrBuilder { +private static final long serialVersionUID = 0L; + // Use ItemOrder.newBuilder() to construct. + private ItemOrder(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ItemOrder() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new ItemOrder(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_ItemOrder_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_ItemOrder_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.ItemOrder.class, sample.rpc.ItemOrder.Builder.class); + } + + public static final int ITEM_ID_FIELD_NUMBER = 1; + private int itemId_ = 0; + /** + * int32 item_id = 1; + * @return The itemId. + */ + @java.lang.Override + public int getItemId() { + return itemId_; + } + + public static final int COUNT_FIELD_NUMBER = 2; + private int count_ = 0; + /** + * int32 count = 2; + * @return The count. + */ + @java.lang.Override + public int getCount() { + return count_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (itemId_ != 0) { + output.writeInt32(1, itemId_); + } + if (count_ != 0) { + output.writeInt32(2, count_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (itemId_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, itemId_); + } + if (count_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, count_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.ItemOrder)) { + return super.equals(obj); + } + sample.rpc.ItemOrder other = (sample.rpc.ItemOrder) obj; + + if (getItemId() + != other.getItemId()) return false; + if (getCount() + != other.getCount()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ITEM_ID_FIELD_NUMBER; + hash = (53 * hash) + getItemId(); + hash = (37 * hash) + COUNT_FIELD_NUMBER; + hash = (53 * hash) + getCount(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.ItemOrder parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.ItemOrder parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.ItemOrder parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.ItemOrder parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.ItemOrder parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.ItemOrder parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.ItemOrder parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.ItemOrder parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.ItemOrder parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.ItemOrder parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.ItemOrder parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.ItemOrder parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.ItemOrder prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.ItemOrder} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.ItemOrder) + sample.rpc.ItemOrderOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_ItemOrder_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_ItemOrder_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.ItemOrder.class, sample.rpc.ItemOrder.Builder.class); + } + + // Construct using sample.rpc.ItemOrder.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + itemId_ = 0; + count_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_ItemOrder_descriptor; + } + + @java.lang.Override + public sample.rpc.ItemOrder getDefaultInstanceForType() { + return sample.rpc.ItemOrder.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.ItemOrder build() { + sample.rpc.ItemOrder result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.ItemOrder buildPartial() { + sample.rpc.ItemOrder result = new sample.rpc.ItemOrder(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.ItemOrder result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.itemId_ = itemId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.count_ = count_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.ItemOrder) { + return mergeFrom((sample.rpc.ItemOrder)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.ItemOrder other) { + if (other == sample.rpc.ItemOrder.getDefaultInstance()) return this; + if (other.getItemId() != 0) { + setItemId(other.getItemId()); + } + if (other.getCount() != 0) { + setCount(other.getCount()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + itemId_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 16: { + count_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private int itemId_ ; + /** + * int32 item_id = 1; + * @return The itemId. + */ + @java.lang.Override + public int getItemId() { + return itemId_; + } + /** + * int32 item_id = 1; + * @param value The itemId to set. + * @return This builder for chaining. + */ + public Builder setItemId(int value) { + + itemId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * int32 item_id = 1; + * @return This builder for chaining. + */ + public Builder clearItemId() { + bitField0_ = (bitField0_ & ~0x00000001); + itemId_ = 0; + onChanged(); + return this; + } + + private int count_ ; + /** + * int32 count = 2; + * @return The count. + */ + @java.lang.Override + public int getCount() { + return count_; + } + /** + * int32 count = 2; + * @param value The count to set. + * @return This builder for chaining. + */ + public Builder setCount(int value) { + + count_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * int32 count = 2; + * @return This builder for chaining. + */ + public Builder clearCount() { + bitField0_ = (bitField0_ & ~0x00000002); + count_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.ItemOrder) + } + + // @@protoc_insertion_point(class_scope:rpc.ItemOrder) + private static final sample.rpc.ItemOrder DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.ItemOrder(); + } + + public static sample.rpc.ItemOrder getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ItemOrder parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.ItemOrder getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/ItemOrderOrBuilder.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/ItemOrderOrBuilder.java new file mode 100644 index 00000000..412c5349 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/ItemOrderOrBuilder.java @@ -0,0 +1,21 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface ItemOrderOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.ItemOrder) + com.google.protobuf.MessageOrBuilder { + + /** + * int32 item_id = 1; + * @return The itemId. + */ + int getItemId(); + + /** + * int32 count = 2; + * @return The count. + */ + int getCount(); +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/Order.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/Order.java new file mode 100644 index 00000000..eee1c7e8 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/Order.java @@ -0,0 +1,1198 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.Order} + */ +public final class Order extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.Order) + OrderOrBuilder { +private static final long serialVersionUID = 0L; + // Use Order.newBuilder() to construct. + private Order(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private Order() { + orderId_ = ""; + customerName_ = ""; + statement_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new Order(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_Order_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_Order_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.Order.class, sample.rpc.Order.Builder.class); + } + + public static final int ORDER_ID_FIELD_NUMBER = 1; + @SuppressWarnings("serial") + private volatile java.lang.Object orderId_ = ""; + /** + * string order_id = 1; + * @return The orderId. + */ + @java.lang.Override + public java.lang.String getOrderId() { + java.lang.Object ref = orderId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + orderId_ = s; + return s; + } + } + /** + * string order_id = 1; + * @return The bytes for orderId. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getOrderIdBytes() { + java.lang.Object ref = orderId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + orderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TIMESTAMP_FIELD_NUMBER = 2; + private long timestamp_ = 0L; + /** + * int64 timestamp = 2; + * @return The timestamp. + */ + @java.lang.Override + public long getTimestamp() { + return timestamp_; + } + + public static final int CUSTOMER_ID_FIELD_NUMBER = 3; + private int customerId_ = 0; + /** + * int32 customer_id = 3; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + + public static final int CUSTOMER_NAME_FIELD_NUMBER = 4; + @SuppressWarnings("serial") + private volatile java.lang.Object customerName_ = ""; + /** + * string customer_name = 4; + * @return The customerName. + */ + @java.lang.Override + public java.lang.String getCustomerName() { + java.lang.Object ref = customerName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + customerName_ = s; + return s; + } + } + /** + * string customer_name = 4; + * @return The bytes for customerName. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getCustomerNameBytes() { + java.lang.Object ref = customerName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + customerName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int STATEMENT_FIELD_NUMBER = 5; + @SuppressWarnings("serial") + private java.util.List statement_; + /** + * repeated .rpc.Statement statement = 5; + */ + @java.lang.Override + public java.util.List getStatementList() { + return statement_; + } + /** + * repeated .rpc.Statement statement = 5; + */ + @java.lang.Override + public java.util.List + getStatementOrBuilderList() { + return statement_; + } + /** + * repeated .rpc.Statement statement = 5; + */ + @java.lang.Override + public int getStatementCount() { + return statement_.size(); + } + /** + * repeated .rpc.Statement statement = 5; + */ + @java.lang.Override + public sample.rpc.Statement getStatement(int index) { + return statement_.get(index); + } + /** + * repeated .rpc.Statement statement = 5; + */ + @java.lang.Override + public sample.rpc.StatementOrBuilder getStatementOrBuilder( + int index) { + return statement_.get(index); + } + + public static final int TOTAL_FIELD_NUMBER = 6; + private int total_ = 0; + /** + * int32 total = 6; + * @return The total. + */ + @java.lang.Override + public int getTotal() { + return total_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(orderId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, orderId_); + } + if (timestamp_ != 0L) { + output.writeInt64(2, timestamp_); + } + if (customerId_ != 0) { + output.writeInt32(3, customerId_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(customerName_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, customerName_); + } + for (int i = 0; i < statement_.size(); i++) { + output.writeMessage(5, statement_.get(i)); + } + if (total_ != 0) { + output.writeInt32(6, total_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(orderId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, orderId_); + } + if (timestamp_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(2, timestamp_); + } + if (customerId_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(3, customerId_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(customerName_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, customerName_); + } + for (int i = 0; i < statement_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, statement_.get(i)); + } + if (total_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(6, total_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.Order)) { + return super.equals(obj); + } + sample.rpc.Order other = (sample.rpc.Order) obj; + + if (!getOrderId() + .equals(other.getOrderId())) return false; + if (getTimestamp() + != other.getTimestamp()) return false; + if (getCustomerId() + != other.getCustomerId()) return false; + if (!getCustomerName() + .equals(other.getCustomerName())) return false; + if (!getStatementList() + .equals(other.getStatementList())) return false; + if (getTotal() + != other.getTotal()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ORDER_ID_FIELD_NUMBER; + hash = (53 * hash) + getOrderId().hashCode(); + hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getTimestamp()); + hash = (37 * hash) + CUSTOMER_ID_FIELD_NUMBER; + hash = (53 * hash) + getCustomerId(); + hash = (37 * hash) + CUSTOMER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getCustomerName().hashCode(); + if (getStatementCount() > 0) { + hash = (37 * hash) + STATEMENT_FIELD_NUMBER; + hash = (53 * hash) + getStatementList().hashCode(); + } + hash = (37 * hash) + TOTAL_FIELD_NUMBER; + hash = (53 * hash) + getTotal(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.Order parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.Order parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.Order parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.Order parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.Order parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.Order parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.Order parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.Order parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.Order parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.Order parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.Order parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.Order parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.Order prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.Order} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.Order) + sample.rpc.OrderOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_Order_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_Order_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.Order.class, sample.rpc.Order.Builder.class); + } + + // Construct using sample.rpc.Order.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + orderId_ = ""; + timestamp_ = 0L; + customerId_ = 0; + customerName_ = ""; + if (statementBuilder_ == null) { + statement_ = java.util.Collections.emptyList(); + } else { + statement_ = null; + statementBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + total_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_Order_descriptor; + } + + @java.lang.Override + public sample.rpc.Order getDefaultInstanceForType() { + return sample.rpc.Order.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.Order build() { + sample.rpc.Order result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.Order buildPartial() { + sample.rpc.Order result = new sample.rpc.Order(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(sample.rpc.Order result) { + if (statementBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0)) { + statement_ = java.util.Collections.unmodifiableList(statement_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.statement_ = statement_; + } else { + result.statement_ = statementBuilder_.build(); + } + } + + private void buildPartial0(sample.rpc.Order result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.orderId_ = orderId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.timestamp_ = timestamp_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.customerId_ = customerId_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.customerName_ = customerName_; + } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.total_ = total_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.Order) { + return mergeFrom((sample.rpc.Order)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.Order other) { + if (other == sample.rpc.Order.getDefaultInstance()) return this; + if (!other.getOrderId().isEmpty()) { + orderId_ = other.orderId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getTimestamp() != 0L) { + setTimestamp(other.getTimestamp()); + } + if (other.getCustomerId() != 0) { + setCustomerId(other.getCustomerId()); + } + if (!other.getCustomerName().isEmpty()) { + customerName_ = other.customerName_; + bitField0_ |= 0x00000008; + onChanged(); + } + if (statementBuilder_ == null) { + if (!other.statement_.isEmpty()) { + if (statement_.isEmpty()) { + statement_ = other.statement_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureStatementIsMutable(); + statement_.addAll(other.statement_); + } + onChanged(); + } + } else { + if (!other.statement_.isEmpty()) { + if (statementBuilder_.isEmpty()) { + statementBuilder_.dispose(); + statementBuilder_ = null; + statement_ = other.statement_; + bitField0_ = (bitField0_ & ~0x00000010); + statementBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getStatementFieldBuilder() : null; + } else { + statementBuilder_.addAllMessages(other.statement_); + } + } + } + if (other.getTotal() != 0) { + setTotal(other.getTotal()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + orderId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: { + timestamp_ = input.readInt64(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 24: { + customerId_ = input.readInt32(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 34: { + customerName_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 42: { + sample.rpc.Statement m = + input.readMessage( + sample.rpc.Statement.parser(), + extensionRegistry); + if (statementBuilder_ == null) { + ensureStatementIsMutable(); + statement_.add(m); + } else { + statementBuilder_.addMessage(m); + } + break; + } // case 42 + case 48: { + total_ = input.readInt32(); + bitField0_ |= 0x00000020; + break; + } // case 48 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private java.lang.Object orderId_ = ""; + /** + * string order_id = 1; + * @return The orderId. + */ + public java.lang.String getOrderId() { + java.lang.Object ref = orderId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + orderId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string order_id = 1; + * @return The bytes for orderId. + */ + public com.google.protobuf.ByteString + getOrderIdBytes() { + java.lang.Object ref = orderId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + orderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string order_id = 1; + * @param value The orderId to set. + * @return This builder for chaining. + */ + public Builder setOrderId( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + orderId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * string order_id = 1; + * @return This builder for chaining. + */ + public Builder clearOrderId() { + orderId_ = getDefaultInstance().getOrderId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * string order_id = 1; + * @param value The bytes for orderId to set. + * @return This builder for chaining. + */ + public Builder setOrderIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + orderId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private long timestamp_ ; + /** + * int64 timestamp = 2; + * @return The timestamp. + */ + @java.lang.Override + public long getTimestamp() { + return timestamp_; + } + /** + * int64 timestamp = 2; + * @param value The timestamp to set. + * @return This builder for chaining. + */ + public Builder setTimestamp(long value) { + + timestamp_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * int64 timestamp = 2; + * @return This builder for chaining. + */ + public Builder clearTimestamp() { + bitField0_ = (bitField0_ & ~0x00000002); + timestamp_ = 0L; + onChanged(); + return this; + } + + private int customerId_ ; + /** + * int32 customer_id = 3; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + /** + * int32 customer_id = 3; + * @param value The customerId to set. + * @return This builder for chaining. + */ + public Builder setCustomerId(int value) { + + customerId_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * int32 customer_id = 3; + * @return This builder for chaining. + */ + public Builder clearCustomerId() { + bitField0_ = (bitField0_ & ~0x00000004); + customerId_ = 0; + onChanged(); + return this; + } + + private java.lang.Object customerName_ = ""; + /** + * string customer_name = 4; + * @return The customerName. + */ + public java.lang.String getCustomerName() { + java.lang.Object ref = customerName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + customerName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string customer_name = 4; + * @return The bytes for customerName. + */ + public com.google.protobuf.ByteString + getCustomerNameBytes() { + java.lang.Object ref = customerName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + customerName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string customer_name = 4; + * @param value The customerName to set. + * @return This builder for chaining. + */ + public Builder setCustomerName( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + customerName_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + /** + * string customer_name = 4; + * @return This builder for chaining. + */ + public Builder clearCustomerName() { + customerName_ = getDefaultInstance().getCustomerName(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + /** + * string customer_name = 4; + * @param value The bytes for customerName to set. + * @return This builder for chaining. + */ + public Builder setCustomerNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + customerName_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + private java.util.List statement_ = + java.util.Collections.emptyList(); + private void ensureStatementIsMutable() { + if (!((bitField0_ & 0x00000010) != 0)) { + statement_ = new java.util.ArrayList(statement_); + bitField0_ |= 0x00000010; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + sample.rpc.Statement, sample.rpc.Statement.Builder, sample.rpc.StatementOrBuilder> statementBuilder_; + + /** + * repeated .rpc.Statement statement = 5; + */ + public java.util.List getStatementList() { + if (statementBuilder_ == null) { + return java.util.Collections.unmodifiableList(statement_); + } else { + return statementBuilder_.getMessageList(); + } + } + /** + * repeated .rpc.Statement statement = 5; + */ + public int getStatementCount() { + if (statementBuilder_ == null) { + return statement_.size(); + } else { + return statementBuilder_.getCount(); + } + } + /** + * repeated .rpc.Statement statement = 5; + */ + public sample.rpc.Statement getStatement(int index) { + if (statementBuilder_ == null) { + return statement_.get(index); + } else { + return statementBuilder_.getMessage(index); + } + } + /** + * repeated .rpc.Statement statement = 5; + */ + public Builder setStatement( + int index, sample.rpc.Statement value) { + if (statementBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatementIsMutable(); + statement_.set(index, value); + onChanged(); + } else { + statementBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .rpc.Statement statement = 5; + */ + public Builder setStatement( + int index, sample.rpc.Statement.Builder builderForValue) { + if (statementBuilder_ == null) { + ensureStatementIsMutable(); + statement_.set(index, builderForValue.build()); + onChanged(); + } else { + statementBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .rpc.Statement statement = 5; + */ + public Builder addStatement(sample.rpc.Statement value) { + if (statementBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatementIsMutable(); + statement_.add(value); + onChanged(); + } else { + statementBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .rpc.Statement statement = 5; + */ + public Builder addStatement( + int index, sample.rpc.Statement value) { + if (statementBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStatementIsMutable(); + statement_.add(index, value); + onChanged(); + } else { + statementBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .rpc.Statement statement = 5; + */ + public Builder addStatement( + sample.rpc.Statement.Builder builderForValue) { + if (statementBuilder_ == null) { + ensureStatementIsMutable(); + statement_.add(builderForValue.build()); + onChanged(); + } else { + statementBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .rpc.Statement statement = 5; + */ + public Builder addStatement( + int index, sample.rpc.Statement.Builder builderForValue) { + if (statementBuilder_ == null) { + ensureStatementIsMutable(); + statement_.add(index, builderForValue.build()); + onChanged(); + } else { + statementBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .rpc.Statement statement = 5; + */ + public Builder addAllStatement( + java.lang.Iterable values) { + if (statementBuilder_ == null) { + ensureStatementIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, statement_); + onChanged(); + } else { + statementBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .rpc.Statement statement = 5; + */ + public Builder clearStatement() { + if (statementBuilder_ == null) { + statement_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + } else { + statementBuilder_.clear(); + } + return this; + } + /** + * repeated .rpc.Statement statement = 5; + */ + public Builder removeStatement(int index) { + if (statementBuilder_ == null) { + ensureStatementIsMutable(); + statement_.remove(index); + onChanged(); + } else { + statementBuilder_.remove(index); + } + return this; + } + /** + * repeated .rpc.Statement statement = 5; + */ + public sample.rpc.Statement.Builder getStatementBuilder( + int index) { + return getStatementFieldBuilder().getBuilder(index); + } + /** + * repeated .rpc.Statement statement = 5; + */ + public sample.rpc.StatementOrBuilder getStatementOrBuilder( + int index) { + if (statementBuilder_ == null) { + return statement_.get(index); } else { + return statementBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .rpc.Statement statement = 5; + */ + public java.util.List + getStatementOrBuilderList() { + if (statementBuilder_ != null) { + return statementBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(statement_); + } + } + /** + * repeated .rpc.Statement statement = 5; + */ + public sample.rpc.Statement.Builder addStatementBuilder() { + return getStatementFieldBuilder().addBuilder( + sample.rpc.Statement.getDefaultInstance()); + } + /** + * repeated .rpc.Statement statement = 5; + */ + public sample.rpc.Statement.Builder addStatementBuilder( + int index) { + return getStatementFieldBuilder().addBuilder( + index, sample.rpc.Statement.getDefaultInstance()); + } + /** + * repeated .rpc.Statement statement = 5; + */ + public java.util.List + getStatementBuilderList() { + return getStatementFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + sample.rpc.Statement, sample.rpc.Statement.Builder, sample.rpc.StatementOrBuilder> + getStatementFieldBuilder() { + if (statementBuilder_ == null) { + statementBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + sample.rpc.Statement, sample.rpc.Statement.Builder, sample.rpc.StatementOrBuilder>( + statement_, + ((bitField0_ & 0x00000010) != 0), + getParentForChildren(), + isClean()); + statement_ = null; + } + return statementBuilder_; + } + + private int total_ ; + /** + * int32 total = 6; + * @return The total. + */ + @java.lang.Override + public int getTotal() { + return total_; + } + /** + * int32 total = 6; + * @param value The total to set. + * @return This builder for chaining. + */ + public Builder setTotal(int value) { + + total_ = value; + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + /** + * int32 total = 6; + * @return This builder for chaining. + */ + public Builder clearTotal() { + bitField0_ = (bitField0_ & ~0x00000020); + total_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.Order) + } + + // @@protoc_insertion_point(class_scope:rpc.Order) + private static final sample.rpc.Order DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.Order(); + } + + public static sample.rpc.Order getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Order parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.Order getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/OrderOrBuilder.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/OrderOrBuilder.java new file mode 100644 index 00000000..d2f9598e --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/OrderOrBuilder.java @@ -0,0 +1,75 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface OrderOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.Order) + com.google.protobuf.MessageOrBuilder { + + /** + * string order_id = 1; + * @return The orderId. + */ + java.lang.String getOrderId(); + /** + * string order_id = 1; + * @return The bytes for orderId. + */ + com.google.protobuf.ByteString + getOrderIdBytes(); + + /** + * int64 timestamp = 2; + * @return The timestamp. + */ + long getTimestamp(); + + /** + * int32 customer_id = 3; + * @return The customerId. + */ + int getCustomerId(); + + /** + * string customer_name = 4; + * @return The customerName. + */ + java.lang.String getCustomerName(); + /** + * string customer_name = 4; + * @return The bytes for customerName. + */ + com.google.protobuf.ByteString + getCustomerNameBytes(); + + /** + * repeated .rpc.Statement statement = 5; + */ + java.util.List + getStatementList(); + /** + * repeated .rpc.Statement statement = 5; + */ + sample.rpc.Statement getStatement(int index); + /** + * repeated .rpc.Statement statement = 5; + */ + int getStatementCount(); + /** + * repeated .rpc.Statement statement = 5; + */ + java.util.List + getStatementOrBuilderList(); + /** + * repeated .rpc.Statement statement = 5; + */ + sample.rpc.StatementOrBuilder getStatementOrBuilder( + int index); + + /** + * int32 total = 6; + * @return The total. + */ + int getTotal(); +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PaymentRequest.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PaymentRequest.java new file mode 100644 index 00000000..ccb770ba --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PaymentRequest.java @@ -0,0 +1,641 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.PaymentRequest} + */ +public final class PaymentRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.PaymentRequest) + PaymentRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use PaymentRequest.newBuilder() to construct. + private PaymentRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private PaymentRequest() { + transactionId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new PaymentRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_PaymentRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_PaymentRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.PaymentRequest.class, sample.rpc.PaymentRequest.Builder.class); + } + + public static final int TRANSACTION_ID_FIELD_NUMBER = 1; + @SuppressWarnings("serial") + private volatile java.lang.Object transactionId_ = ""; + /** + * string transaction_id = 1; + * @return The transactionId. + */ + @java.lang.Override + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } + } + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CUSTOMER_ID_FIELD_NUMBER = 2; + private int customerId_ = 0; + /** + * int32 customer_id = 2; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + + public static final int AMOUNT_FIELD_NUMBER = 3; + private int amount_ = 0; + /** + * int32 amount = 3; + * @return The amount. + */ + @java.lang.Override + public int getAmount() { + return amount_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(transactionId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, transactionId_); + } + if (customerId_ != 0) { + output.writeInt32(2, customerId_); + } + if (amount_ != 0) { + output.writeInt32(3, amount_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(transactionId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, transactionId_); + } + if (customerId_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, customerId_); + } + if (amount_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(3, amount_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.PaymentRequest)) { + return super.equals(obj); + } + sample.rpc.PaymentRequest other = (sample.rpc.PaymentRequest) obj; + + if (!getTransactionId() + .equals(other.getTransactionId())) return false; + if (getCustomerId() + != other.getCustomerId()) return false; + if (getAmount() + != other.getAmount()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TRANSACTION_ID_FIELD_NUMBER; + hash = (53 * hash) + getTransactionId().hashCode(); + hash = (37 * hash) + CUSTOMER_ID_FIELD_NUMBER; + hash = (53 * hash) + getCustomerId(); + hash = (37 * hash) + AMOUNT_FIELD_NUMBER; + hash = (53 * hash) + getAmount(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.PaymentRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PaymentRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PaymentRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PaymentRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PaymentRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PaymentRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PaymentRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.PaymentRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.PaymentRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.PaymentRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.PaymentRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.PaymentRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.PaymentRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.PaymentRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.PaymentRequest) + sample.rpc.PaymentRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_PaymentRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_PaymentRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.PaymentRequest.class, sample.rpc.PaymentRequest.Builder.class); + } + + // Construct using sample.rpc.PaymentRequest.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + transactionId_ = ""; + customerId_ = 0; + amount_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_PaymentRequest_descriptor; + } + + @java.lang.Override + public sample.rpc.PaymentRequest getDefaultInstanceForType() { + return sample.rpc.PaymentRequest.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.PaymentRequest build() { + sample.rpc.PaymentRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.PaymentRequest buildPartial() { + sample.rpc.PaymentRequest result = new sample.rpc.PaymentRequest(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.PaymentRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.transactionId_ = transactionId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.customerId_ = customerId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.amount_ = amount_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.PaymentRequest) { + return mergeFrom((sample.rpc.PaymentRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.PaymentRequest other) { + if (other == sample.rpc.PaymentRequest.getDefaultInstance()) return this; + if (!other.getTransactionId().isEmpty()) { + transactionId_ = other.transactionId_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getCustomerId() != 0) { + setCustomerId(other.getCustomerId()); + } + if (other.getAmount() != 0) { + setAmount(other.getAmount()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + transactionId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: { + customerId_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 24: { + amount_ = input.readInt32(); + bitField0_ |= 0x00000004; + break; + } // case 24 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private java.lang.Object transactionId_ = ""; + /** + * string transaction_id = 1; + * @return The transactionId. + */ + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string transaction_id = 1; + * @param value The transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionId( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * string transaction_id = 1; + * @return This builder for chaining. + */ + public Builder clearTransactionId() { + transactionId_ = getDefaultInstance().getTransactionId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * string transaction_id = 1; + * @param value The bytes for transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int customerId_ ; + /** + * int32 customer_id = 2; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + /** + * int32 customer_id = 2; + * @param value The customerId to set. + * @return This builder for chaining. + */ + public Builder setCustomerId(int value) { + + customerId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * int32 customer_id = 2; + * @return This builder for chaining. + */ + public Builder clearCustomerId() { + bitField0_ = (bitField0_ & ~0x00000002); + customerId_ = 0; + onChanged(); + return this; + } + + private int amount_ ; + /** + * int32 amount = 3; + * @return The amount. + */ + @java.lang.Override + public int getAmount() { + return amount_; + } + /** + * int32 amount = 3; + * @param value The amount to set. + * @return This builder for chaining. + */ + public Builder setAmount(int value) { + + amount_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * int32 amount = 3; + * @return This builder for chaining. + */ + public Builder clearAmount() { + bitField0_ = (bitField0_ & ~0x00000004); + amount_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.PaymentRequest) + } + + // @@protoc_insertion_point(class_scope:rpc.PaymentRequest) + private static final sample.rpc.PaymentRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.PaymentRequest(); + } + + public static sample.rpc.PaymentRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PaymentRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.PaymentRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PaymentRequestOrBuilder.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PaymentRequestOrBuilder.java new file mode 100644 index 00000000..bee10612 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PaymentRequestOrBuilder.java @@ -0,0 +1,33 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface PaymentRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.PaymentRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * string transaction_id = 1; + * @return The transactionId. + */ + java.lang.String getTransactionId(); + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + com.google.protobuf.ByteString + getTransactionIdBytes(); + + /** + * int32 customer_id = 2; + * @return The customerId. + */ + int getCustomerId(); + + /** + * int32 amount = 3; + * @return The amount. + */ + int getAmount(); +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderRequest.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderRequest.java new file mode 100644 index 00000000..88754013 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderRequest.java @@ -0,0 +1,793 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.PlaceOrderRequest} + */ +public final class PlaceOrderRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.PlaceOrderRequest) + PlaceOrderRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use PlaceOrderRequest.newBuilder() to construct. + private PlaceOrderRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private PlaceOrderRequest() { + itemOrder_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new PlaceOrderRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_PlaceOrderRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_PlaceOrderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.PlaceOrderRequest.class, sample.rpc.PlaceOrderRequest.Builder.class); + } + + public static final int CUSTOMER_ID_FIELD_NUMBER = 1; + private int customerId_ = 0; + /** + * int32 customer_id = 1; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + + public static final int ITEM_ORDER_FIELD_NUMBER = 2; + @SuppressWarnings("serial") + private java.util.List itemOrder_; + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + @java.lang.Override + public java.util.List getItemOrderList() { + return itemOrder_; + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + @java.lang.Override + public java.util.List + getItemOrderOrBuilderList() { + return itemOrder_; + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + @java.lang.Override + public int getItemOrderCount() { + return itemOrder_.size(); + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + @java.lang.Override + public sample.rpc.ItemOrder getItemOrder(int index) { + return itemOrder_.get(index); + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + @java.lang.Override + public sample.rpc.ItemOrderOrBuilder getItemOrderOrBuilder( + int index) { + return itemOrder_.get(index); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (customerId_ != 0) { + output.writeInt32(1, customerId_); + } + for (int i = 0; i < itemOrder_.size(); i++) { + output.writeMessage(2, itemOrder_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (customerId_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, customerId_); + } + for (int i = 0; i < itemOrder_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, itemOrder_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.PlaceOrderRequest)) { + return super.equals(obj); + } + sample.rpc.PlaceOrderRequest other = (sample.rpc.PlaceOrderRequest) obj; + + if (getCustomerId() + != other.getCustomerId()) return false; + if (!getItemOrderList() + .equals(other.getItemOrderList())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + CUSTOMER_ID_FIELD_NUMBER; + hash = (53 * hash) + getCustomerId(); + if (getItemOrderCount() > 0) { + hash = (37 * hash) + ITEM_ORDER_FIELD_NUMBER; + hash = (53 * hash) + getItemOrderList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.PlaceOrderRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PlaceOrderRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PlaceOrderRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PlaceOrderRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PlaceOrderRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PlaceOrderRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PlaceOrderRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.PlaceOrderRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.PlaceOrderRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.PlaceOrderRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.PlaceOrderRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.PlaceOrderRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.PlaceOrderRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.PlaceOrderRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.PlaceOrderRequest) + sample.rpc.PlaceOrderRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_PlaceOrderRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_PlaceOrderRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.PlaceOrderRequest.class, sample.rpc.PlaceOrderRequest.Builder.class); + } + + // Construct using sample.rpc.PlaceOrderRequest.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + customerId_ = 0; + if (itemOrderBuilder_ == null) { + itemOrder_ = java.util.Collections.emptyList(); + } else { + itemOrder_ = null; + itemOrderBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_PlaceOrderRequest_descriptor; + } + + @java.lang.Override + public sample.rpc.PlaceOrderRequest getDefaultInstanceForType() { + return sample.rpc.PlaceOrderRequest.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.PlaceOrderRequest build() { + sample.rpc.PlaceOrderRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.PlaceOrderRequest buildPartial() { + sample.rpc.PlaceOrderRequest result = new sample.rpc.PlaceOrderRequest(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields(sample.rpc.PlaceOrderRequest result) { + if (itemOrderBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0)) { + itemOrder_ = java.util.Collections.unmodifiableList(itemOrder_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.itemOrder_ = itemOrder_; + } else { + result.itemOrder_ = itemOrderBuilder_.build(); + } + } + + private void buildPartial0(sample.rpc.PlaceOrderRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.customerId_ = customerId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.PlaceOrderRequest) { + return mergeFrom((sample.rpc.PlaceOrderRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.PlaceOrderRequest other) { + if (other == sample.rpc.PlaceOrderRequest.getDefaultInstance()) return this; + if (other.getCustomerId() != 0) { + setCustomerId(other.getCustomerId()); + } + if (itemOrderBuilder_ == null) { + if (!other.itemOrder_.isEmpty()) { + if (itemOrder_.isEmpty()) { + itemOrder_ = other.itemOrder_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureItemOrderIsMutable(); + itemOrder_.addAll(other.itemOrder_); + } + onChanged(); + } + } else { + if (!other.itemOrder_.isEmpty()) { + if (itemOrderBuilder_.isEmpty()) { + itemOrderBuilder_.dispose(); + itemOrderBuilder_ = null; + itemOrder_ = other.itemOrder_; + bitField0_ = (bitField0_ & ~0x00000002); + itemOrderBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getItemOrderFieldBuilder() : null; + } else { + itemOrderBuilder_.addAllMessages(other.itemOrder_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + customerId_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: { + sample.rpc.ItemOrder m = + input.readMessage( + sample.rpc.ItemOrder.parser(), + extensionRegistry); + if (itemOrderBuilder_ == null) { + ensureItemOrderIsMutable(); + itemOrder_.add(m); + } else { + itemOrderBuilder_.addMessage(m); + } + break; + } // case 18 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private int customerId_ ; + /** + * int32 customer_id = 1; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + /** + * int32 customer_id = 1; + * @param value The customerId to set. + * @return This builder for chaining. + */ + public Builder setCustomerId(int value) { + + customerId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * int32 customer_id = 1; + * @return This builder for chaining. + */ + public Builder clearCustomerId() { + bitField0_ = (bitField0_ & ~0x00000001); + customerId_ = 0; + onChanged(); + return this; + } + + private java.util.List itemOrder_ = + java.util.Collections.emptyList(); + private void ensureItemOrderIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + itemOrder_ = new java.util.ArrayList(itemOrder_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + sample.rpc.ItemOrder, sample.rpc.ItemOrder.Builder, sample.rpc.ItemOrderOrBuilder> itemOrderBuilder_; + + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public java.util.List getItemOrderList() { + if (itemOrderBuilder_ == null) { + return java.util.Collections.unmodifiableList(itemOrder_); + } else { + return itemOrderBuilder_.getMessageList(); + } + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public int getItemOrderCount() { + if (itemOrderBuilder_ == null) { + return itemOrder_.size(); + } else { + return itemOrderBuilder_.getCount(); + } + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public sample.rpc.ItemOrder getItemOrder(int index) { + if (itemOrderBuilder_ == null) { + return itemOrder_.get(index); + } else { + return itemOrderBuilder_.getMessage(index); + } + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public Builder setItemOrder( + int index, sample.rpc.ItemOrder value) { + if (itemOrderBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureItemOrderIsMutable(); + itemOrder_.set(index, value); + onChanged(); + } else { + itemOrderBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public Builder setItemOrder( + int index, sample.rpc.ItemOrder.Builder builderForValue) { + if (itemOrderBuilder_ == null) { + ensureItemOrderIsMutable(); + itemOrder_.set(index, builderForValue.build()); + onChanged(); + } else { + itemOrderBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public Builder addItemOrder(sample.rpc.ItemOrder value) { + if (itemOrderBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureItemOrderIsMutable(); + itemOrder_.add(value); + onChanged(); + } else { + itemOrderBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public Builder addItemOrder( + int index, sample.rpc.ItemOrder value) { + if (itemOrderBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureItemOrderIsMutable(); + itemOrder_.add(index, value); + onChanged(); + } else { + itemOrderBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public Builder addItemOrder( + sample.rpc.ItemOrder.Builder builderForValue) { + if (itemOrderBuilder_ == null) { + ensureItemOrderIsMutable(); + itemOrder_.add(builderForValue.build()); + onChanged(); + } else { + itemOrderBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public Builder addItemOrder( + int index, sample.rpc.ItemOrder.Builder builderForValue) { + if (itemOrderBuilder_ == null) { + ensureItemOrderIsMutable(); + itemOrder_.add(index, builderForValue.build()); + onChanged(); + } else { + itemOrderBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public Builder addAllItemOrder( + java.lang.Iterable values) { + if (itemOrderBuilder_ == null) { + ensureItemOrderIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, itemOrder_); + onChanged(); + } else { + itemOrderBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public Builder clearItemOrder() { + if (itemOrderBuilder_ == null) { + itemOrder_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + itemOrderBuilder_.clear(); + } + return this; + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public Builder removeItemOrder(int index) { + if (itemOrderBuilder_ == null) { + ensureItemOrderIsMutable(); + itemOrder_.remove(index); + onChanged(); + } else { + itemOrderBuilder_.remove(index); + } + return this; + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public sample.rpc.ItemOrder.Builder getItemOrderBuilder( + int index) { + return getItemOrderFieldBuilder().getBuilder(index); + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public sample.rpc.ItemOrderOrBuilder getItemOrderOrBuilder( + int index) { + if (itemOrderBuilder_ == null) { + return itemOrder_.get(index); } else { + return itemOrderBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public java.util.List + getItemOrderOrBuilderList() { + if (itemOrderBuilder_ != null) { + return itemOrderBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(itemOrder_); + } + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public sample.rpc.ItemOrder.Builder addItemOrderBuilder() { + return getItemOrderFieldBuilder().addBuilder( + sample.rpc.ItemOrder.getDefaultInstance()); + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public sample.rpc.ItemOrder.Builder addItemOrderBuilder( + int index) { + return getItemOrderFieldBuilder().addBuilder( + index, sample.rpc.ItemOrder.getDefaultInstance()); + } + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + public java.util.List + getItemOrderBuilderList() { + return getItemOrderFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + sample.rpc.ItemOrder, sample.rpc.ItemOrder.Builder, sample.rpc.ItemOrderOrBuilder> + getItemOrderFieldBuilder() { + if (itemOrderBuilder_ == null) { + itemOrderBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + sample.rpc.ItemOrder, sample.rpc.ItemOrder.Builder, sample.rpc.ItemOrderOrBuilder>( + itemOrder_, + ((bitField0_ & 0x00000002) != 0), + getParentForChildren(), + isClean()); + itemOrder_ = null; + } + return itemOrderBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.PlaceOrderRequest) + } + + // @@protoc_insertion_point(class_scope:rpc.PlaceOrderRequest) + private static final sample.rpc.PlaceOrderRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.PlaceOrderRequest(); + } + + public static sample.rpc.PlaceOrderRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PlaceOrderRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.PlaceOrderRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderRequestOrBuilder.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderRequestOrBuilder.java new file mode 100644 index 00000000..7f996bbf --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderRequestOrBuilder.java @@ -0,0 +1,39 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface PlaceOrderRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.PlaceOrderRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * int32 customer_id = 1; + * @return The customerId. + */ + int getCustomerId(); + + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + java.util.List + getItemOrderList(); + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + sample.rpc.ItemOrder getItemOrder(int index); + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + int getItemOrderCount(); + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + java.util.List + getItemOrderOrBuilderList(); + /** + * repeated .rpc.ItemOrder item_order = 2; + */ + sample.rpc.ItemOrderOrBuilder getItemOrderOrBuilder( + int index); +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderResponse.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderResponse.java new file mode 100644 index 00000000..455445ad --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderResponse.java @@ -0,0 +1,509 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.PlaceOrderResponse} + */ +public final class PlaceOrderResponse extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.PlaceOrderResponse) + PlaceOrderResponseOrBuilder { +private static final long serialVersionUID = 0L; + // Use PlaceOrderResponse.newBuilder() to construct. + private PlaceOrderResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private PlaceOrderResponse() { + orderId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new PlaceOrderResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_PlaceOrderResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_PlaceOrderResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.PlaceOrderResponse.class, sample.rpc.PlaceOrderResponse.Builder.class); + } + + public static final int ORDER_ID_FIELD_NUMBER = 1; + @SuppressWarnings("serial") + private volatile java.lang.Object orderId_ = ""; + /** + * string order_id = 1; + * @return The orderId. + */ + @java.lang.Override + public java.lang.String getOrderId() { + java.lang.Object ref = orderId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + orderId_ = s; + return s; + } + } + /** + * string order_id = 1; + * @return The bytes for orderId. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getOrderIdBytes() { + java.lang.Object ref = orderId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + orderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(orderId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, orderId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(orderId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, orderId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.PlaceOrderResponse)) { + return super.equals(obj); + } + sample.rpc.PlaceOrderResponse other = (sample.rpc.PlaceOrderResponse) obj; + + if (!getOrderId() + .equals(other.getOrderId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ORDER_ID_FIELD_NUMBER; + hash = (53 * hash) + getOrderId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.PlaceOrderResponse parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PlaceOrderResponse parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PlaceOrderResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PlaceOrderResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PlaceOrderResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PlaceOrderResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PlaceOrderResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.PlaceOrderResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.PlaceOrderResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.PlaceOrderResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.PlaceOrderResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.PlaceOrderResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.PlaceOrderResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.PlaceOrderResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.PlaceOrderResponse) + sample.rpc.PlaceOrderResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_PlaceOrderResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_PlaceOrderResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.PlaceOrderResponse.class, sample.rpc.PlaceOrderResponse.Builder.class); + } + + // Construct using sample.rpc.PlaceOrderResponse.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + orderId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_PlaceOrderResponse_descriptor; + } + + @java.lang.Override + public sample.rpc.PlaceOrderResponse getDefaultInstanceForType() { + return sample.rpc.PlaceOrderResponse.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.PlaceOrderResponse build() { + sample.rpc.PlaceOrderResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.PlaceOrderResponse buildPartial() { + sample.rpc.PlaceOrderResponse result = new sample.rpc.PlaceOrderResponse(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.PlaceOrderResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.orderId_ = orderId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.PlaceOrderResponse) { + return mergeFrom((sample.rpc.PlaceOrderResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.PlaceOrderResponse other) { + if (other == sample.rpc.PlaceOrderResponse.getDefaultInstance()) return this; + if (!other.getOrderId().isEmpty()) { + orderId_ = other.orderId_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + orderId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private java.lang.Object orderId_ = ""; + /** + * string order_id = 1; + * @return The orderId. + */ + public java.lang.String getOrderId() { + java.lang.Object ref = orderId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + orderId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string order_id = 1; + * @return The bytes for orderId. + */ + public com.google.protobuf.ByteString + getOrderIdBytes() { + java.lang.Object ref = orderId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + orderId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string order_id = 1; + * @param value The orderId to set. + * @return This builder for chaining. + */ + public Builder setOrderId( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + orderId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * string order_id = 1; + * @return This builder for chaining. + */ + public Builder clearOrderId() { + orderId_ = getDefaultInstance().getOrderId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * string order_id = 1; + * @param value The bytes for orderId to set. + * @return This builder for chaining. + */ + public Builder setOrderIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + orderId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.PlaceOrderResponse) + } + + // @@protoc_insertion_point(class_scope:rpc.PlaceOrderResponse) + private static final sample.rpc.PlaceOrderResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.PlaceOrderResponse(); + } + + public static sample.rpc.PlaceOrderResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PlaceOrderResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.PlaceOrderResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderResponseOrBuilder.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderResponseOrBuilder.java new file mode 100644 index 00000000..55ee1b9b --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PlaceOrderResponseOrBuilder.java @@ -0,0 +1,21 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface PlaceOrderResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.PlaceOrderResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * string order_id = 1; + * @return The orderId. + */ + java.lang.String getOrderId(); + /** + * string order_id = 1; + * @return The bytes for orderId. + */ + com.google.protobuf.ByteString + getOrderIdBytes(); +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PrepareRequest.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PrepareRequest.java new file mode 100644 index 00000000..c4cab4cd --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PrepareRequest.java @@ -0,0 +1,509 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.PrepareRequest} + */ +public final class PrepareRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.PrepareRequest) + PrepareRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use PrepareRequest.newBuilder() to construct. + private PrepareRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private PrepareRequest() { + transactionId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new PrepareRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_PrepareRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_PrepareRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.PrepareRequest.class, sample.rpc.PrepareRequest.Builder.class); + } + + public static final int TRANSACTION_ID_FIELD_NUMBER = 1; + @SuppressWarnings("serial") + private volatile java.lang.Object transactionId_ = ""; + /** + * string transaction_id = 1; + * @return The transactionId. + */ + @java.lang.Override + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } + } + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(transactionId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, transactionId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(transactionId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, transactionId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.PrepareRequest)) { + return super.equals(obj); + } + sample.rpc.PrepareRequest other = (sample.rpc.PrepareRequest) obj; + + if (!getTransactionId() + .equals(other.getTransactionId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TRANSACTION_ID_FIELD_NUMBER; + hash = (53 * hash) + getTransactionId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.PrepareRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PrepareRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PrepareRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PrepareRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PrepareRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.PrepareRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.PrepareRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.PrepareRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.PrepareRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.PrepareRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.PrepareRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.PrepareRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.PrepareRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.PrepareRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.PrepareRequest) + sample.rpc.PrepareRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_PrepareRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_PrepareRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.PrepareRequest.class, sample.rpc.PrepareRequest.Builder.class); + } + + // Construct using sample.rpc.PrepareRequest.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + transactionId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_PrepareRequest_descriptor; + } + + @java.lang.Override + public sample.rpc.PrepareRequest getDefaultInstanceForType() { + return sample.rpc.PrepareRequest.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.PrepareRequest build() { + sample.rpc.PrepareRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.PrepareRequest buildPartial() { + sample.rpc.PrepareRequest result = new sample.rpc.PrepareRequest(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.PrepareRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.transactionId_ = transactionId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.PrepareRequest) { + return mergeFrom((sample.rpc.PrepareRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.PrepareRequest other) { + if (other == sample.rpc.PrepareRequest.getDefaultInstance()) return this; + if (!other.getTransactionId().isEmpty()) { + transactionId_ = other.transactionId_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + transactionId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private java.lang.Object transactionId_ = ""; + /** + * string transaction_id = 1; + * @return The transactionId. + */ + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string transaction_id = 1; + * @param value The transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionId( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * string transaction_id = 1; + * @return This builder for chaining. + */ + public Builder clearTransactionId() { + transactionId_ = getDefaultInstance().getTransactionId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * string transaction_id = 1; + * @param value The bytes for transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.PrepareRequest) + } + + // @@protoc_insertion_point(class_scope:rpc.PrepareRequest) + private static final sample.rpc.PrepareRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.PrepareRequest(); + } + + public static sample.rpc.PrepareRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public PrepareRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.PrepareRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PrepareRequestOrBuilder.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PrepareRequestOrBuilder.java new file mode 100644 index 00000000..e5648535 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/PrepareRequestOrBuilder.java @@ -0,0 +1,21 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface PrepareRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.PrepareRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * string transaction_id = 1; + * @return The transactionId. + */ + java.lang.String getTransactionId(); + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + com.google.protobuf.ByteString + getTransactionIdBytes(); +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/RepaymentRequest.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/RepaymentRequest.java new file mode 100644 index 00000000..0a037f4d --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/RepaymentRequest.java @@ -0,0 +1,505 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.RepaymentRequest} + */ +public final class RepaymentRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.RepaymentRequest) + RepaymentRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use RepaymentRequest.newBuilder() to construct. + private RepaymentRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private RepaymentRequest() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new RepaymentRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_RepaymentRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_RepaymentRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.RepaymentRequest.class, sample.rpc.RepaymentRequest.Builder.class); + } + + public static final int CUSTOMER_ID_FIELD_NUMBER = 1; + private int customerId_ = 0; + /** + * int32 customer_id = 1; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + + public static final int AMOUNT_FIELD_NUMBER = 2; + private int amount_ = 0; + /** + * int32 amount = 2; + * @return The amount. + */ + @java.lang.Override + public int getAmount() { + return amount_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (customerId_ != 0) { + output.writeInt32(1, customerId_); + } + if (amount_ != 0) { + output.writeInt32(2, amount_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (customerId_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, customerId_); + } + if (amount_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, amount_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.RepaymentRequest)) { + return super.equals(obj); + } + sample.rpc.RepaymentRequest other = (sample.rpc.RepaymentRequest) obj; + + if (getCustomerId() + != other.getCustomerId()) return false; + if (getAmount() + != other.getAmount()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + CUSTOMER_ID_FIELD_NUMBER; + hash = (53 * hash) + getCustomerId(); + hash = (37 * hash) + AMOUNT_FIELD_NUMBER; + hash = (53 * hash) + getAmount(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.RepaymentRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.RepaymentRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.RepaymentRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.RepaymentRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.RepaymentRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.RepaymentRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.RepaymentRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.RepaymentRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.RepaymentRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.RepaymentRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.RepaymentRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.RepaymentRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.RepaymentRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.RepaymentRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.RepaymentRequest) + sample.rpc.RepaymentRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_RepaymentRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_RepaymentRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.RepaymentRequest.class, sample.rpc.RepaymentRequest.Builder.class); + } + + // Construct using sample.rpc.RepaymentRequest.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + customerId_ = 0; + amount_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_RepaymentRequest_descriptor; + } + + @java.lang.Override + public sample.rpc.RepaymentRequest getDefaultInstanceForType() { + return sample.rpc.RepaymentRequest.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.RepaymentRequest build() { + sample.rpc.RepaymentRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.RepaymentRequest buildPartial() { + sample.rpc.RepaymentRequest result = new sample.rpc.RepaymentRequest(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.RepaymentRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.customerId_ = customerId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.amount_ = amount_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.RepaymentRequest) { + return mergeFrom((sample.rpc.RepaymentRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.RepaymentRequest other) { + if (other == sample.rpc.RepaymentRequest.getDefaultInstance()) return this; + if (other.getCustomerId() != 0) { + setCustomerId(other.getCustomerId()); + } + if (other.getAmount() != 0) { + setAmount(other.getAmount()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + customerId_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 16: { + amount_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private int customerId_ ; + /** + * int32 customer_id = 1; + * @return The customerId. + */ + @java.lang.Override + public int getCustomerId() { + return customerId_; + } + /** + * int32 customer_id = 1; + * @param value The customerId to set. + * @return This builder for chaining. + */ + public Builder setCustomerId(int value) { + + customerId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * int32 customer_id = 1; + * @return This builder for chaining. + */ + public Builder clearCustomerId() { + bitField0_ = (bitField0_ & ~0x00000001); + customerId_ = 0; + onChanged(); + return this; + } + + private int amount_ ; + /** + * int32 amount = 2; + * @return The amount. + */ + @java.lang.Override + public int getAmount() { + return amount_; + } + /** + * int32 amount = 2; + * @param value The amount to set. + * @return This builder for chaining. + */ + public Builder setAmount(int value) { + + amount_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * int32 amount = 2; + * @return This builder for chaining. + */ + public Builder clearAmount() { + bitField0_ = (bitField0_ & ~0x00000002); + amount_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.RepaymentRequest) + } + + // @@protoc_insertion_point(class_scope:rpc.RepaymentRequest) + private static final sample.rpc.RepaymentRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.RepaymentRequest(); + } + + public static sample.rpc.RepaymentRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RepaymentRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.RepaymentRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/RepaymentRequestOrBuilder.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/RepaymentRequestOrBuilder.java new file mode 100644 index 00000000..c373e5c3 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/RepaymentRequestOrBuilder.java @@ -0,0 +1,21 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface RepaymentRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.RepaymentRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * int32 customer_id = 1; + * @return The customerId. + */ + int getCustomerId(); + + /** + * int32 amount = 2; + * @return The amount. + */ + int getAmount(); +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/RollbackRequest.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/RollbackRequest.java new file mode 100644 index 00000000..710b30ff --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/RollbackRequest.java @@ -0,0 +1,509 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.RollbackRequest} + */ +public final class RollbackRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.RollbackRequest) + RollbackRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use RollbackRequest.newBuilder() to construct. + private RollbackRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private RollbackRequest() { + transactionId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new RollbackRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_RollbackRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_RollbackRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.RollbackRequest.class, sample.rpc.RollbackRequest.Builder.class); + } + + public static final int TRANSACTION_ID_FIELD_NUMBER = 1; + @SuppressWarnings("serial") + private volatile java.lang.Object transactionId_ = ""; + /** + * string transaction_id = 1; + * @return The transactionId. + */ + @java.lang.Override + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } + } + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(transactionId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, transactionId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(transactionId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, transactionId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.RollbackRequest)) { + return super.equals(obj); + } + sample.rpc.RollbackRequest other = (sample.rpc.RollbackRequest) obj; + + if (!getTransactionId() + .equals(other.getTransactionId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TRANSACTION_ID_FIELD_NUMBER; + hash = (53 * hash) + getTransactionId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.RollbackRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.RollbackRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.RollbackRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.RollbackRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.RollbackRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.RollbackRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.RollbackRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.RollbackRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.RollbackRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.RollbackRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.RollbackRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.RollbackRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.RollbackRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.RollbackRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.RollbackRequest) + sample.rpc.RollbackRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_RollbackRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_RollbackRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.RollbackRequest.class, sample.rpc.RollbackRequest.Builder.class); + } + + // Construct using sample.rpc.RollbackRequest.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + transactionId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_RollbackRequest_descriptor; + } + + @java.lang.Override + public sample.rpc.RollbackRequest getDefaultInstanceForType() { + return sample.rpc.RollbackRequest.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.RollbackRequest build() { + sample.rpc.RollbackRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.RollbackRequest buildPartial() { + sample.rpc.RollbackRequest result = new sample.rpc.RollbackRequest(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.RollbackRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.transactionId_ = transactionId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.RollbackRequest) { + return mergeFrom((sample.rpc.RollbackRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.RollbackRequest other) { + if (other == sample.rpc.RollbackRequest.getDefaultInstance()) return this; + if (!other.getTransactionId().isEmpty()) { + transactionId_ = other.transactionId_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + transactionId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private java.lang.Object transactionId_ = ""; + /** + * string transaction_id = 1; + * @return The transactionId. + */ + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string transaction_id = 1; + * @param value The transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionId( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * string transaction_id = 1; + * @return This builder for chaining. + */ + public Builder clearTransactionId() { + transactionId_ = getDefaultInstance().getTransactionId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * string transaction_id = 1; + * @param value The bytes for transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.RollbackRequest) + } + + // @@protoc_insertion_point(class_scope:rpc.RollbackRequest) + private static final sample.rpc.RollbackRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.RollbackRequest(); + } + + public static sample.rpc.RollbackRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RollbackRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.RollbackRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/RollbackRequestOrBuilder.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/RollbackRequestOrBuilder.java new file mode 100644 index 00000000..b06ab5da --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/RollbackRequestOrBuilder.java @@ -0,0 +1,21 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface RollbackRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.RollbackRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * string transaction_id = 1; + * @return The transactionId. + */ + java.lang.String getTransactionId(); + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + com.google.protobuf.ByteString + getTransactionIdBytes(); +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/Sample.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/Sample.java new file mode 100644 index 00000000..1f528b54 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/Sample.java @@ -0,0 +1,269 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public final class Sample { + private Sample() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistryLite registry) { + } + + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions( + (com.google.protobuf.ExtensionRegistryLite) registry); + } + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_ItemOrder_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_ItemOrder_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_PlaceOrderRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_PlaceOrderRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_PlaceOrderResponse_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_PlaceOrderResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_Order_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_Order_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_Statement_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_Statement_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_GetOrderRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_GetOrderRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_GetOrderResponse_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_GetOrderResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_GetOrdersRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_GetOrdersRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_GetOrdersResponse_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_GetOrdersResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_GetCustomerInfoRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_GetCustomerInfoRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_GetCustomerInfoResponse_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_GetCustomerInfoResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_PaymentRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_PaymentRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_RepaymentRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_RepaymentRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_PrepareRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_PrepareRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_ValidateRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_ValidateRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_CommitRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_CommitRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_rpc_RollbackRequest_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_rpc_RollbackRequest_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\014sample.proto\022\003rpc\032\033google/protobuf/emp" + + "ty.proto\"+\n\tItemOrder\022\017\n\007item_id\030\001 \001(\005\022\r" + + "\n\005count\030\002 \001(\005\"L\n\021PlaceOrderRequest\022\023\n\013cu" + + "stomer_id\030\001 \001(\005\022\"\n\nitem_order\030\002 \003(\0132\016.rp" + + "c.ItemOrder\"&\n\022PlaceOrderResponse\022\020\n\010ord" + + "er_id\030\001 \001(\t\"\212\001\n\005Order\022\020\n\010order_id\030\001 \001(\t\022" + + "\021\n\ttimestamp\030\002 \001(\003\022\023\n\013customer_id\030\003 \001(\005\022" + + "\025\n\rcustomer_name\030\004 \001(\t\022!\n\tstatement\030\005 \003(" + + "\0132\016.rpc.Statement\022\r\n\005total\030\006 \001(\005\"\\\n\tStat" + + "ement\022\017\n\007item_id\030\001 \001(\005\022\021\n\titem_name\030\002 \001(" + + "\t\022\r\n\005price\030\003 \001(\005\022\r\n\005count\030\004 \001(\005\022\r\n\005total" + + "\030\005 \001(\005\"#\n\017GetOrderRequest\022\020\n\010order_id\030\001 " + + "\001(\t\"-\n\020GetOrderResponse\022\031\n\005order\030\001 \001(\0132\n" + + ".rpc.Order\"\'\n\020GetOrdersRequest\022\023\n\013custom" + + "er_id\030\001 \001(\005\".\n\021GetOrdersResponse\022\031\n\005orde" + + "r\030\001 \003(\0132\n.rpc.Order\"]\n\026GetCustomerInfoRe" + + "quest\022\033\n\016transaction_id\030\001 \001(\tH\000\210\001\001\022\023\n\013cu" + + "stomer_id\030\002 \001(\005B\021\n\017_transaction_id\"_\n\027Ge" + + "tCustomerInfoResponse\022\n\n\002id\030\001 \001(\005\022\014\n\004nam" + + "e\030\002 \001(\t\022\024\n\014credit_limit\030\003 \001(\005\022\024\n\014credit_" + + "total\030\004 \001(\005\"M\n\016PaymentRequest\022\026\n\016transac" + + "tion_id\030\001 \001(\t\022\023\n\013customer_id\030\002 \001(\005\022\016\n\006am" + + "ount\030\003 \001(\005\"7\n\020RepaymentRequest\022\023\n\013custom" + + "er_id\030\001 \001(\005\022\016\n\006amount\030\002 \001(\005\"(\n\016PrepareRe" + + "quest\022\026\n\016transaction_id\030\001 \001(\t\")\n\017Validat" + + "eRequest\022\026\n\016transaction_id\030\001 \001(\t\"\'\n\rComm" + + "itRequest\022\026\n\016transaction_id\030\001 \001(\t\")\n\017Rol" + + "lbackRequest\022\026\n\016transaction_id\030\001 \001(\t2\310\001\n" + + "\014OrderService\022?\n\nPlaceOrder\022\026.rpc.PlaceO" + + "rderRequest\032\027.rpc.PlaceOrderResponse\"\000\0229" + + "\n\010GetOrder\022\024.rpc.GetOrderRequest\032\025.rpc.G" + + "etOrderResponse\"\000\022<\n\tGetOrders\022\025.rpc.Get" + + "OrdersRequest\032\026.rpc.GetOrdersResponse\"\0002" + + "\303\003\n\017CustomerService\022N\n\017GetCustomerInfo\022\033" + + ".rpc.GetCustomerInfoRequest\032\034.rpc.GetCus" + + "tomerInfoResponse\"\000\022<\n\tRepayment\022\025.rpc.R" + + "epaymentRequest\032\026.google.protobuf.Empty\"" + + "\000\0228\n\007Payment\022\023.rpc.PaymentRequest\032\026.goog" + + "le.protobuf.Empty\"\000\0228\n\007Prepare\022\023.rpc.Pre" + + "pareRequest\032\026.google.protobuf.Empty\"\000\022:\n" + + "\010Validate\022\024.rpc.ValidateRequest\032\026.google" + + ".protobuf.Empty\"\000\0226\n\006Commit\022\022.rpc.Commit" + + "Request\032\026.google.protobuf.Empty\"\000\022:\n\010Rol" + + "lback\022\024.rpc.RollbackRequest\032\026.google.pro" + + "tobuf.Empty\"\000B\026\n\nsample.rpcB\006SampleP\001b\006p" + + "roto3" + }; + descriptor = com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.protobuf.EmptyProto.getDescriptor(), + }); + internal_static_rpc_ItemOrder_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_rpc_ItemOrder_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_ItemOrder_descriptor, + new java.lang.String[] { "ItemId", "Count", }); + internal_static_rpc_PlaceOrderRequest_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_rpc_PlaceOrderRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_PlaceOrderRequest_descriptor, + new java.lang.String[] { "CustomerId", "ItemOrder", }); + internal_static_rpc_PlaceOrderResponse_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_rpc_PlaceOrderResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_PlaceOrderResponse_descriptor, + new java.lang.String[] { "OrderId", }); + internal_static_rpc_Order_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_rpc_Order_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_Order_descriptor, + new java.lang.String[] { "OrderId", "Timestamp", "CustomerId", "CustomerName", "Statement", "Total", }); + internal_static_rpc_Statement_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_rpc_Statement_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_Statement_descriptor, + new java.lang.String[] { "ItemId", "ItemName", "Price", "Count", "Total", }); + internal_static_rpc_GetOrderRequest_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_rpc_GetOrderRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_GetOrderRequest_descriptor, + new java.lang.String[] { "OrderId", }); + internal_static_rpc_GetOrderResponse_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_rpc_GetOrderResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_GetOrderResponse_descriptor, + new java.lang.String[] { "Order", }); + internal_static_rpc_GetOrdersRequest_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_rpc_GetOrdersRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_GetOrdersRequest_descriptor, + new java.lang.String[] { "CustomerId", }); + internal_static_rpc_GetOrdersResponse_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_rpc_GetOrdersResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_GetOrdersResponse_descriptor, + new java.lang.String[] { "Order", }); + internal_static_rpc_GetCustomerInfoRequest_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_rpc_GetCustomerInfoRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_GetCustomerInfoRequest_descriptor, + new java.lang.String[] { "TransactionId", "CustomerId", "TransactionId", }); + internal_static_rpc_GetCustomerInfoResponse_descriptor = + getDescriptor().getMessageTypes().get(10); + internal_static_rpc_GetCustomerInfoResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_GetCustomerInfoResponse_descriptor, + new java.lang.String[] { "Id", "Name", "CreditLimit", "CreditTotal", }); + internal_static_rpc_PaymentRequest_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_rpc_PaymentRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_PaymentRequest_descriptor, + new java.lang.String[] { "TransactionId", "CustomerId", "Amount", }); + internal_static_rpc_RepaymentRequest_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_rpc_RepaymentRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_RepaymentRequest_descriptor, + new java.lang.String[] { "CustomerId", "Amount", }); + internal_static_rpc_PrepareRequest_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_rpc_PrepareRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_PrepareRequest_descriptor, + new java.lang.String[] { "TransactionId", }); + internal_static_rpc_ValidateRequest_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_rpc_ValidateRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_ValidateRequest_descriptor, + new java.lang.String[] { "TransactionId", }); + internal_static_rpc_CommitRequest_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_rpc_CommitRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_CommitRequest_descriptor, + new java.lang.String[] { "TransactionId", }); + internal_static_rpc_RollbackRequest_descriptor = + getDescriptor().getMessageTypes().get(16); + internal_static_rpc_RollbackRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_rpc_RollbackRequest_descriptor, + new java.lang.String[] { "TransactionId", }); + com.google.protobuf.EmptyProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/Statement.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/Statement.java new file mode 100644 index 00000000..164cde9b --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/Statement.java @@ -0,0 +1,773 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.Statement} + */ +public final class Statement extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.Statement) + StatementOrBuilder { +private static final long serialVersionUID = 0L; + // Use Statement.newBuilder() to construct. + private Statement(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private Statement() { + itemName_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new Statement(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_Statement_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_Statement_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.Statement.class, sample.rpc.Statement.Builder.class); + } + + public static final int ITEM_ID_FIELD_NUMBER = 1; + private int itemId_ = 0; + /** + * int32 item_id = 1; + * @return The itemId. + */ + @java.lang.Override + public int getItemId() { + return itemId_; + } + + public static final int ITEM_NAME_FIELD_NUMBER = 2; + @SuppressWarnings("serial") + private volatile java.lang.Object itemName_ = ""; + /** + * string item_name = 2; + * @return The itemName. + */ + @java.lang.Override + public java.lang.String getItemName() { + java.lang.Object ref = itemName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + itemName_ = s; + return s; + } + } + /** + * string item_name = 2; + * @return The bytes for itemName. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getItemNameBytes() { + java.lang.Object ref = itemName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + itemName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PRICE_FIELD_NUMBER = 3; + private int price_ = 0; + /** + * int32 price = 3; + * @return The price. + */ + @java.lang.Override + public int getPrice() { + return price_; + } + + public static final int COUNT_FIELD_NUMBER = 4; + private int count_ = 0; + /** + * int32 count = 4; + * @return The count. + */ + @java.lang.Override + public int getCount() { + return count_; + } + + public static final int TOTAL_FIELD_NUMBER = 5; + private int total_ = 0; + /** + * int32 total = 5; + * @return The total. + */ + @java.lang.Override + public int getTotal() { + return total_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (itemId_ != 0) { + output.writeInt32(1, itemId_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(itemName_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, itemName_); + } + if (price_ != 0) { + output.writeInt32(3, price_); + } + if (count_ != 0) { + output.writeInt32(4, count_); + } + if (total_ != 0) { + output.writeInt32(5, total_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (itemId_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, itemId_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(itemName_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, itemName_); + } + if (price_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(3, price_); + } + if (count_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(4, count_); + } + if (total_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(5, total_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.Statement)) { + return super.equals(obj); + } + sample.rpc.Statement other = (sample.rpc.Statement) obj; + + if (getItemId() + != other.getItemId()) return false; + if (!getItemName() + .equals(other.getItemName())) return false; + if (getPrice() + != other.getPrice()) return false; + if (getCount() + != other.getCount()) return false; + if (getTotal() + != other.getTotal()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ITEM_ID_FIELD_NUMBER; + hash = (53 * hash) + getItemId(); + hash = (37 * hash) + ITEM_NAME_FIELD_NUMBER; + hash = (53 * hash) + getItemName().hashCode(); + hash = (37 * hash) + PRICE_FIELD_NUMBER; + hash = (53 * hash) + getPrice(); + hash = (37 * hash) + COUNT_FIELD_NUMBER; + hash = (53 * hash) + getCount(); + hash = (37 * hash) + TOTAL_FIELD_NUMBER; + hash = (53 * hash) + getTotal(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.Statement parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.Statement parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.Statement parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.Statement parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.Statement parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.Statement parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.Statement parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.Statement parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.Statement parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.Statement parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.Statement parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.Statement parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.Statement prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.Statement} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.Statement) + sample.rpc.StatementOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_Statement_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_Statement_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.Statement.class, sample.rpc.Statement.Builder.class); + } + + // Construct using sample.rpc.Statement.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + itemId_ = 0; + itemName_ = ""; + price_ = 0; + count_ = 0; + total_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_Statement_descriptor; + } + + @java.lang.Override + public sample.rpc.Statement getDefaultInstanceForType() { + return sample.rpc.Statement.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.Statement build() { + sample.rpc.Statement result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.Statement buildPartial() { + sample.rpc.Statement result = new sample.rpc.Statement(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.Statement result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.itemId_ = itemId_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.itemName_ = itemName_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.price_ = price_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.count_ = count_; + } + if (((from_bitField0_ & 0x00000010) != 0)) { + result.total_ = total_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.Statement) { + return mergeFrom((sample.rpc.Statement)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.Statement other) { + if (other == sample.rpc.Statement.getDefaultInstance()) return this; + if (other.getItemId() != 0) { + setItemId(other.getItemId()); + } + if (!other.getItemName().isEmpty()) { + itemName_ = other.itemName_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.getPrice() != 0) { + setPrice(other.getPrice()); + } + if (other.getCount() != 0) { + setCount(other.getCount()); + } + if (other.getTotal() != 0) { + setTotal(other.getTotal()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + itemId_ = input.readInt32(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: { + itemName_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 24: { + price_ = input.readInt32(); + bitField0_ |= 0x00000004; + break; + } // case 24 + case 32: { + count_ = input.readInt32(); + bitField0_ |= 0x00000008; + break; + } // case 32 + case 40: { + total_ = input.readInt32(); + bitField0_ |= 0x00000010; + break; + } // case 40 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private int itemId_ ; + /** + * int32 item_id = 1; + * @return The itemId. + */ + @java.lang.Override + public int getItemId() { + return itemId_; + } + /** + * int32 item_id = 1; + * @param value The itemId to set. + * @return This builder for chaining. + */ + public Builder setItemId(int value) { + + itemId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * int32 item_id = 1; + * @return This builder for chaining. + */ + public Builder clearItemId() { + bitField0_ = (bitField0_ & ~0x00000001); + itemId_ = 0; + onChanged(); + return this; + } + + private java.lang.Object itemName_ = ""; + /** + * string item_name = 2; + * @return The itemName. + */ + public java.lang.String getItemName() { + java.lang.Object ref = itemName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + itemName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string item_name = 2; + * @return The bytes for itemName. + */ + public com.google.protobuf.ByteString + getItemNameBytes() { + java.lang.Object ref = itemName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + itemName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string item_name = 2; + * @param value The itemName to set. + * @return This builder for chaining. + */ + public Builder setItemName( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + itemName_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * string item_name = 2; + * @return This builder for chaining. + */ + public Builder clearItemName() { + itemName_ = getDefaultInstance().getItemName(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * string item_name = 2; + * @param value The bytes for itemName to set. + * @return This builder for chaining. + */ + public Builder setItemNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + itemName_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private int price_ ; + /** + * int32 price = 3; + * @return The price. + */ + @java.lang.Override + public int getPrice() { + return price_; + } + /** + * int32 price = 3; + * @param value The price to set. + * @return This builder for chaining. + */ + public Builder setPrice(int value) { + + price_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * int32 price = 3; + * @return This builder for chaining. + */ + public Builder clearPrice() { + bitField0_ = (bitField0_ & ~0x00000004); + price_ = 0; + onChanged(); + return this; + } + + private int count_ ; + /** + * int32 count = 4; + * @return The count. + */ + @java.lang.Override + public int getCount() { + return count_; + } + /** + * int32 count = 4; + * @param value The count to set. + * @return This builder for chaining. + */ + public Builder setCount(int value) { + + count_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + /** + * int32 count = 4; + * @return This builder for chaining. + */ + public Builder clearCount() { + bitField0_ = (bitField0_ & ~0x00000008); + count_ = 0; + onChanged(); + return this; + } + + private int total_ ; + /** + * int32 total = 5; + * @return The total. + */ + @java.lang.Override + public int getTotal() { + return total_; + } + /** + * int32 total = 5; + * @param value The total to set. + * @return This builder for chaining. + */ + public Builder setTotal(int value) { + + total_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + /** + * int32 total = 5; + * @return This builder for chaining. + */ + public Builder clearTotal() { + bitField0_ = (bitField0_ & ~0x00000010); + total_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.Statement) + } + + // @@protoc_insertion_point(class_scope:rpc.Statement) + private static final sample.rpc.Statement DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.Statement(); + } + + public static sample.rpc.Statement getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Statement parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.Statement getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/StatementOrBuilder.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/StatementOrBuilder.java new file mode 100644 index 00000000..2ac1a841 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/StatementOrBuilder.java @@ -0,0 +1,45 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface StatementOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.Statement) + com.google.protobuf.MessageOrBuilder { + + /** + * int32 item_id = 1; + * @return The itemId. + */ + int getItemId(); + + /** + * string item_name = 2; + * @return The itemName. + */ + java.lang.String getItemName(); + /** + * string item_name = 2; + * @return The bytes for itemName. + */ + com.google.protobuf.ByteString + getItemNameBytes(); + + /** + * int32 price = 3; + * @return The price. + */ + int getPrice(); + + /** + * int32 count = 4; + * @return The count. + */ + int getCount(); + + /** + * int32 total = 5; + * @return The total. + */ + int getTotal(); +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/ValidateRequest.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/ValidateRequest.java new file mode 100644 index 00000000..448a8d98 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/ValidateRequest.java @@ -0,0 +1,509 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +/** + * Protobuf type {@code rpc.ValidateRequest} + */ +public final class ValidateRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:rpc.ValidateRequest) + ValidateRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use ValidateRequest.newBuilder() to construct. + private ValidateRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ValidateRequest() { + transactionId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new ValidateRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_ValidateRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_ValidateRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.ValidateRequest.class, sample.rpc.ValidateRequest.Builder.class); + } + + public static final int TRANSACTION_ID_FIELD_NUMBER = 1; + @SuppressWarnings("serial") + private volatile java.lang.Object transactionId_ = ""; + /** + * string transaction_id = 1; + * @return The transactionId. + */ + @java.lang.Override + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } + } + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(transactionId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, transactionId_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(transactionId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, transactionId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof sample.rpc.ValidateRequest)) { + return super.equals(obj); + } + sample.rpc.ValidateRequest other = (sample.rpc.ValidateRequest) obj; + + if (!getTransactionId() + .equals(other.getTransactionId())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TRANSACTION_ID_FIELD_NUMBER; + hash = (53 * hash) + getTransactionId().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static sample.rpc.ValidateRequest parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.ValidateRequest parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.ValidateRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.ValidateRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.ValidateRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static sample.rpc.ValidateRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static sample.rpc.ValidateRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.ValidateRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static sample.rpc.ValidateRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static sample.rpc.ValidateRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static sample.rpc.ValidateRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static sample.rpc.ValidateRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(sample.rpc.ValidateRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code rpc.ValidateRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:rpc.ValidateRequest) + sample.rpc.ValidateRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return sample.rpc.Sample.internal_static_rpc_ValidateRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return sample.rpc.Sample.internal_static_rpc_ValidateRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + sample.rpc.ValidateRequest.class, sample.rpc.ValidateRequest.Builder.class); + } + + // Construct using sample.rpc.ValidateRequest.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + transactionId_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return sample.rpc.Sample.internal_static_rpc_ValidateRequest_descriptor; + } + + @java.lang.Override + public sample.rpc.ValidateRequest getDefaultInstanceForType() { + return sample.rpc.ValidateRequest.getDefaultInstance(); + } + + @java.lang.Override + public sample.rpc.ValidateRequest build() { + sample.rpc.ValidateRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public sample.rpc.ValidateRequest buildPartial() { + sample.rpc.ValidateRequest result = new sample.rpc.ValidateRequest(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(sample.rpc.ValidateRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.transactionId_ = transactionId_; + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof sample.rpc.ValidateRequest) { + return mergeFrom((sample.rpc.ValidateRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(sample.rpc.ValidateRequest other) { + if (other == sample.rpc.ValidateRequest.getDefaultInstance()) return this; + if (!other.getTransactionId().isEmpty()) { + transactionId_ = other.transactionId_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + transactionId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private java.lang.Object transactionId_ = ""; + /** + * string transaction_id = 1; + * @return The transactionId. + */ + public java.lang.String getTransactionId() { + java.lang.Object ref = transactionId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + transactionId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + public com.google.protobuf.ByteString + getTransactionIdBytes() { + java.lang.Object ref = transactionId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + transactionId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string transaction_id = 1; + * @param value The transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionId( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * string transaction_id = 1; + * @return This builder for chaining. + */ + public Builder clearTransactionId() { + transactionId_ = getDefaultInstance().getTransactionId(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * string transaction_id = 1; + * @param value The bytes for transactionId to set. + * @return This builder for chaining. + */ + public Builder setTransactionIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + transactionId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:rpc.ValidateRequest) + } + + // @@protoc_insertion_point(class_scope:rpc.ValidateRequest) + private static final sample.rpc.ValidateRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new sample.rpc.ValidateRequest(); + } + + public static sample.rpc.ValidateRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ValidateRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public sample.rpc.ValidateRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/ValidateRequestOrBuilder.java b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/ValidateRequestOrBuilder.java new file mode 100644 index 00000000..3934c2d2 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/java/sample/rpc/ValidateRequestOrBuilder.java @@ -0,0 +1,21 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: sample.proto + +package sample.rpc; + +public interface ValidateRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:rpc.ValidateRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * string transaction_id = 1; + * @return The transactionId. + */ + java.lang.String getTransactionId(); + /** + * string transaction_id = 1; + * @return The bytes for transactionId. + */ + com.google.protobuf.ByteString + getTransactionIdBytes(); +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/proto/sample.proto b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/proto/sample.proto new file mode 100644 index 00000000..98065367 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/rpc/src/main/proto/sample.proto @@ -0,0 +1,136 @@ +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "sample.rpc"; +option java_outer_classname = "Sample"; + +package rpc; + +import "google/protobuf/empty.proto"; + +// for Order Service +service OrderService { + // Place an order. It's a transaction that spans OrderService and CustomerService + rpc PlaceOrder(PlaceOrderRequest) returns (PlaceOrderResponse) { + } + // Get Order information by order ID + rpc GetOrder(GetOrderRequest) returns (GetOrderResponse) { + } + // Get Order information by customer ID + rpc GetOrders(GetOrdersRequest) returns (GetOrdersResponse) { + } +} + +message ItemOrder { + int32 item_id = 1; + int32 count = 2; +} + +message PlaceOrderRequest { + int32 customer_id = 1; + repeated ItemOrder item_order = 2; +} + +message PlaceOrderResponse { + string order_id = 1; +} + +message Order { + string order_id = 1; + int64 timestamp = 2; + int32 customer_id = 3; + string customer_name = 4; + repeated Statement statement = 5; + int32 total = 6; +} + +message Statement { + int32 item_id = 1; + string item_name = 2; + int32 price = 3; + int32 count = 4; + int32 total = 5; +} + +message GetOrderRequest { + string order_id = 1; +} + +message GetOrderResponse { + Order order = 1; +} + +message GetOrdersRequest { + int32 customer_id = 1; +} + +message GetOrdersResponse { + repeated Order order = 1; +} + +// for Customer Service +service CustomerService { + // Get customer information + rpc GetCustomerInfo(GetCustomerInfoRequest) returns (GetCustomerInfoResponse) { + } + // Credit card repayment + rpc Repayment(RepaymentRequest) returns (google.protobuf.Empty) { + } + + // RPCs for two-phase commit transactions + + // Credit card payment + rpc Payment(PaymentRequest) returns (google.protobuf.Empty) { + } + // Prepare the transaction + rpc Prepare(PrepareRequest) returns (google.protobuf.Empty) { + } + // Validate the transaction + rpc Validate(ValidateRequest) returns (google.protobuf.Empty) { + } + // Commit the transaction + rpc Commit(CommitRequest) returns (google.protobuf.Empty) { + } + // Rollback the transaction + rpc Rollback(RollbackRequest) returns (google.protobuf.Empty) { + } +} + +message GetCustomerInfoRequest { + optional string transaction_id = 1; + int32 customer_id = 2; +} + +message GetCustomerInfoResponse { + int32 id = 1; + string name = 2; + int32 credit_limit = 3; + int32 credit_total = 4; +} + +message PaymentRequest { + string transaction_id = 1; + int32 customer_id = 2; + int32 amount = 3; +} + +message RepaymentRequest { + int32 customer_id = 1; + int32 amount = 2; +} + +message PrepareRequest { + string transaction_id = 1; +} + +message ValidateRequest { + string transaction_id = 1; +} + +message CommitRequest { + string transaction_id = 1; +} + +message RollbackRequest { + string transaction_id = 1; +} diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/scalardb-sql.properties b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/scalardb-sql.properties new file mode 100644 index 00000000..980f8667 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/scalardb-sql.properties @@ -0,0 +1,15 @@ +scalar.db.sql.connection_mode=direct +scalar.db.storage=multi-storage +scalar.db.multi_storage.storages=cassandra,mysql +scalar.db.multi_storage.storages.cassandra.storage=cassandra +scalar.db.multi_storage.storages.cassandra.contact_points=localhost +scalar.db.multi_storage.storages.cassandra.username=cassandra +scalar.db.multi_storage.storages.cassandra.password=cassandra +scalar.db.multi_storage.storages.mysql.storage=jdbc +scalar.db.multi_storage.storages.mysql.contact_points=jdbc:mysql://localhost:3306/ +scalar.db.multi_storage.storages.mysql.username=root +scalar.db.multi_storage.storages.mysql.password=mysql +scalar.db.multi_storage.namespace_mapping=customer_service:mysql,order_service:cassandra,coordinator:cassandra +scalar.db.multi_storage.default_storage=mysql +scalar.db.consensus_commit.isolation_level=SERIALIZABLE + diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/schema.sql b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/schema.sql new file mode 100644 index 00000000..cbf878d1 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/schema.sql @@ -0,0 +1,34 @@ +CREATE COORDINATOR TABLES IF NOT EXIST; + +CREATE NAMESPACE IF NOT EXISTS customer_service; + +CREATE TABLE IF NOT EXISTS customer_service.customers ( + customer_id INT PRIMARY KEY, + name TEXT, + credit_limit INT, + credit_total INT +); + +CREATE NAMESPACE IF NOT EXISTS order_service; + +CREATE TABLE IF NOT EXISTS order_service.orders ( + customer_id INT, + timestamp BIGINT, + order_id TEXT, + PRIMARY KEY (customer_id, timestamp) +); + +CREATE INDEX IF NOT EXISTS ON order_service.orders (order_id); + +CREATE TABLE IF NOT EXISTS order_service.statements ( + order_id TEXT, + item_id INT, + count INT, + PRIMARY KEY (order_id, item_id) +); + +CREATE TABLE IF NOT EXISTS order_service.items ( + item_id INT PRIMARY KEY, + name TEXT, + price INT +); diff --git a/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/settings.gradle b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/settings.gradle new file mode 100644 index 00000000..baf92bf8 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-microservice-transaction-sample/settings.gradle @@ -0,0 +1,6 @@ +rootProject.name = 'spring-data-microservice-transaction-sample' +include 'customer-service' +include 'order-service' +include 'rpc' +include 'client' + diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/README.md b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/README.md new file mode 100644 index 00000000..18912cfa --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/README.md @@ -0,0 +1,310 @@ +# Sample application of Spring Data JDBC for ScalarDB with Multi-storage Transactions + +This tutorial describes how to create a sample Spring Boot application by using Spring Data JDBC for ScalarDB with Multi-storage Transactions. + +## Prerequisites + +- Java (OpenJDK 8 or higher) +- Gradle +- Docker, Docker Compose + +In addition, you need access to the [ScalarDB SQL GitHub repository](https://github.com/scalar-labs/scalardb-sql) and [Packages in ScalarDB SQL repository](https://github.com/orgs/scalar-labs/packages?repo_name=scalardb-sql). +These repositories are available only to users with a commercial license and permission. +To get a license and permission, please [contact us](https://www.scalar-labs.com/contact/). + +You also need the `gpr.user` property for your GitHub username and the `gpr.key` property for your personal access token. +You must either add these properties in `~/.gradle/gradle.properties` or specify the properties by using the `-P` option when running the `./gradlew` command as follows: + +```shell +$ ./gradlew run ... -Pgpr.user= -Pgpr.key= +``` + +Or you can also use environment variables, `USERNAME` for your GitHub username and `TOKEN` for your personal access token. + +```shell +$ export USERNAME= +$ export TOKEN= +``` + +For more details, see [Install - ScalarDB SQL](https://github.com/scalar-labs/scalardb-sql#install). + +## Sample application + +### Overview + +This tutorial describes how to create a sample Spring Boot application for the same use case as [ScalarDB Sample](https://github.com/scalar-labs/scalardb-samples/tree/main/scalardb-sample) but by using Spring Data JDBC for ScalarDB with Multi-storage Transaction. +Please note that application-specific error handling, authentication processing, etc. are omitted in the sample application since this tutorial focuses on explaining how to use Spring Data JDBC for ScalarDB with Multi-storage Transaction. +For details, please see [Guide of Spring Data JDBC for ScalarDB](https://github.com/scalar-labs/scalardb-sql/blob/main/docs/spring-data-guide.md). + +![Overview](images/overview.png) + +### Schema + +[The schema](schema.sql) is as follows: + +```sql +CREATE COORDINATOR TABLES IF NOT EXIST; + +CREATE NAMESPACE IF NOT EXISTS customer; + +CREATE TABLE IF NOT EXISTS customer.customers ( + customer_id INT PRIMARY KEY, + name TEXT, + credit_limit INT, + credit_total INT +); + +CREATE NAMESPACE IF NOT EXISTS "order"; + +CREATE TABLE IF NOT EXISTS "order".orders ( + customer_id INT, + timestamp BIGINT, + order_id TEXT, + PRIMARY KEY (customer_id, timestamp) +); + +CREATE INDEX IF NOT EXISTS ON "order".orders (order_id); + +CREATE TABLE IF NOT EXISTS "order".statements ( + order_id TEXT, + item_id INT, + count INT, + PRIMARY KEY (order_id, item_id) +); + +CREATE TABLE IF NOT EXISTS "order".items ( + item_id INT PRIMARY KEY, + name TEXT, + price INT +); +``` + +All the tables are created in the `customer` and `order` namespaces. + +- `customer.customers`: a table that manages customers' information + - `credit_limit`: the maximum amount of money a lender will allow each customer to spend when using a credit card + - `credit_total`: the amount of money that each customer has already spent by using the credit card +- `order.orders`: a table that manages order information +- `order.statements`: a table that manages order statement information +- `order.items`: a table that manages information of items to be ordered + +The Entity Relationship Diagram for the schema is as follows: + +![ERD](images/ERD.png) + +### Transactions + +The following five transactions are implemented in this sample application: + +1. Getting customer information +2. Placing an order by credit card (checks if the cost of the order is below the credit limit, then records order history and updates the `credit_total` if the check passes) +3. Getting order information by order ID +4. Getting order information by customer ID +5. Repayment (reduces the amount in the `credit_total`) + +## Configuration + +Configurations for the sample Spring Boot application are as follows: + +```application.properties +spring.datasource.driver-class-name=com.scalar.db.sql.jdbc.SqlJdbcDriver +spring.datasource.url=jdbc:scalardb:\ +?scalar.db.sql.connection_mode=direct\ +&scalar.db.storage=multi-storage\ +&scalar.db.multi_storage.storages=cassandra,mysql\ +&scalar.db.multi_storage.storages.cassandra.storage=cassandra\ +&scalar.db.multi_storage.storages.cassandra.contact_points=localhost\ +&scalar.db.multi_storage.storages.cassandra.username=cassandra\ +&scalar.db.multi_storage.storages.cassandra.password=cassandra\ +&scalar.db.multi_storage.storages.mysql.storage=jdbc\ +&scalar.db.multi_storage.storages.mysql.contact_points=jdbc:mysql://localhost:3306/\ +&scalar.db.multi_storage.storages.mysql.username=root\ +&scalar.db.multi_storage.storages.mysql.password=mysql\ +&scalar.db.multi_storage.namespace_mapping=customer:mysql,order:cassandra,coordinator:cassandra\ +&scalar.db.multi_storage.default_storage=cassandra\ +&scalar.db.consensus_commit.isolation_level=SERIALIZABLE +``` + +- `scalar.db.storage`: Specifying `multi-storage` is necessary to use Multi-storage Transactions in ScalarDB. +- `scalar.db.multi_storage.storages`: Your storage names must be defined here. +- `scalar.db.multi_storage.storages.cassandra.*`: These configurations are for the `cassandra` storage, which is one of the storage names defined in `scalar.db.multi_storage.storages`. You can configure all the `scalar.db.*` properties for the `cassandra` storage here. +- `scalar.db.multi_storage.storages.mysql.*`: These configurations are for the `mysql` storage, which is one of the storage names defined in `scalar.db.multi_storage.storages`. You can configure all the `scalar.db.*` properties for the `mysql` storage here. +- `scalar.db.multi_storage.namespace_mapping`: This configuration maps the namespaces to the storage. In this sample application, operations for `customer` namespace tables are mapped to the `mysql` storage and operations for `order` namespace tables are mapped to the `cassandra` storage. You can also define which storage is mapped for the `coordinator` namespace that is used in Consensus Commit transactions. +- `scalar.db.multi_storage.default_storage`: This configuration sets the default storage that is used for operations on unmapped namespace tables. + +For details, please see [Configuration - Multi-storage Transactions](https://github.com/scalar-labs/scalardb/blob/master/docs/multi-storage-transactions.md#configuration). + +## Setup + +### Clone the ScalarDB samples repository + +Open Terminal, then clone the ScalarDB samples repository by running the following command: + +```shell +$ git clone https://github.com/scalar-labs/scalardb-samples +``` + +Then, go to the directory with this sample by running the following command: + +```shell +$ cd scalardb-samples/spring-data-multi-storage-transaction-sample +``` + +### Start Cassandra and MySQL + +To start Cassandra and MySQL, you need to run the following `docker-compose` command: + +```shell +$ docker-compose up -d +``` + +Please note that starting the containers may take more than one minute. + +### Load schema + +You then need to apply the schema with the following command. +To download the CLI tool, `scalardb-sql-cli--all.jar`, see the [Releases](https://github.com/scalar-labs/scalardb-sql/releases) of ScalarDB SQL and download the version that you want to use. + +```shell +$ java -jar scalardb-sql-cli--all.jar --config scalardb-sql.properties --file schema.sql +``` + +### Load initial data + +After the containers have started, you need to load the initial data by running the following command: + +```shell +$ ./gradlew run --args="LoadInitialData" +``` + +After the initial data has loaded, the following records should be stored in the tables: + +- For the `customer.customers` table: + +| customer_id | name | credit_limit | credit_total | +|-------------|---------------|--------------|--------------| +| 1 | Yamada Taro | 10000 | 0 | +| 2 | Yamada Hanako | 10000 | 0 | +| 3 | Suzuki Ichiro | 10000 | 0 | + +- For the `order.items` table: + +| item_id | name | price | +|---------|--------|-------| +| 1 | Apple | 1000 | +| 2 | Orange | 2000 | +| 3 | Grape | 2500 | +| 4 | Mango | 5000 | +| 5 | Melon | 3000 | + +## Run the sample application + +Let's start with getting information about the customer whose ID is `1`: + +```shell +$ ./gradlew run --args="GetCustomerInfo 1" +... +{"customer_id":1,"name":"Yamada Taro","credit_limit":10000,"credit_total":0} +... +``` + +Then, place an order for three apples and two oranges by using customer ID `1`. Note that the order format is `:,:,...`: + +```shell +$ ./gradlew run --args="PlaceOrder 1 1:3,2:2" +... +{"order_id":"5d49eb62-fcb9-4dd2-9ae5-e714d989937f","customer_id":1,"timestamp":1677564659810} +... +``` + +You can see that running this command shows the order ID. + +Let's check the details of the order by using the order ID: + +```shell +$ ./gradlew run --args="GetOrder 5d49eb62-fcb9-4dd2-9ae5-e714d989937f" +... +{"order_id":"5d49eb62-fcb9-4dd2-9ae5-e714d989937f","timestamp":1677564659810,"customer_id":1,"customer_name":"Yamada Taro","statements":[{"item_id":1,"item_name":"Apple","price":1000,"count":3,"total":3000},{"item_id":2,"item_name":"Orange","price":2000,"count":2,"total":4000}],"total":7000} +... +``` + +Then, let's place another order and get the order history of customer ID `1`: + +```shell +$ ./gradlew run --args="PlaceOrder 1 5:1" +... +{"order_id":"ccd97d75-ee57-4393-a0bb-5230c4a8c68a","customer_id":1,"timestamp":1677564776069} +... +$ ./gradlew run --args="GetOrders 1" +... +[{"order_id":"ccd97d75-ee57-4393-a0bb-5230c4a8c68a","timestamp":1677564776069,"customer_id":1,"customer_name":"Yamada Taro","statements":[{"item_id":5,"item_name":"Melon","price":3000,"count":1,"total":3000}],"total":3000},{"order_id":"5d49eb62-fcb9-4dd2-9ae5-e714d989937f","timestamp":1677564659810,"customer_id":1,"customer_name":"Yamada Taro","statements":[{"item_id":1,"item_name":"Apple","price":1000,"count":3,"total":3000},{"item_id":2,"item_name":"Orange","price":2000,"count":2,"total":4000}],"total":7000}] +... +``` + +This order history is shown in descending order by timestamp. + +The customer's current `credit_total` is `10000`. Since the customer has now reached their `credit_limit`, which was shown when retrieving their information, they cannot place anymore orders. + +```shell +$ ./gradlew run --args="GetCustomerInfo 1" +... +{"customer_id":1,"name":"Yamada Taro","credit_limit":10000,"credit_total":10000} +... +$ ./gradlew run --args="PlaceOrder 1 3:1,4:1" +... +java.lang.RuntimeException: Credit limit exceeded. limit:10000, total:17500 + at sample.SampleService.placeOrder(SampleService.java:102) + at sample.SampleService$$FastClassBySpringCGLIB$$1123c447.invoke() + at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218) + at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.invokeJoinpoint(CglibAopProxy.java:793) + at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:163) + at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.proceed(CglibAopProxy.java:763) + at org.springframework.transaction.interceptor.TransactionInterceptor$1.proceedWithInvocation(TransactionInterceptor.java:123) + at org.springframework.transaction.interceptor.TransactionAspectSupport.invokeWithinTransaction(TransactionAspectSupport.java:388) + at org.springframework.transaction.interceptor.TransactionInterceptor.invoke(TransactionInterceptor.java:119) + at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186) + at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.proceed(CglibAopProxy.java:763) + at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:708) + at sample.SampleService$$EnhancerBySpringCGLIB$$1cb0cc8c.placeOrder() + at sample.command.PlaceOrderCommand.call(PlaceOrderCommand.java:37) + at sample.command.PlaceOrderCommand.call(PlaceOrderCommand.java:13) + at picocli.CommandLine.executeUserObject(CommandLine.java:2041) + at picocli.CommandLine.access$1500(CommandLine.java:148) + at picocli.CommandLine$RunLast.executeUserObjectOfLastSubcommandWithSameParent(CommandLine.java:2461) + at picocli.CommandLine$RunLast.handle(CommandLine.java:2453) + at picocli.CommandLine$RunLast.handle(CommandLine.java:2415) + at picocli.CommandLine$AbstractParseResultHandler.execute(CommandLine.java:2273) + at picocli.CommandLine$RunLast.execute(CommandLine.java:2417) + at picocli.CommandLine.execute(CommandLine.java:2170) + at sample.SampleApp.run(SampleApp.java:26) + at org.springframework.boot.SpringApplication.callRunner(SpringApplication.java:768) + at org.springframework.boot.SpringApplication.callRunners(SpringApplication.java:752) + at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) + at org.springframework.boot.SpringApplication.run(SpringApplication.java:1303) + at org.springframework.boot.SpringApplication.run(SpringApplication.java:1292) + at sample.SampleApp.main(SampleApp.java:35) +... +``` + +After making a payment, the customer will be able to place orders again. + +```shell +$ ./gradlew run --args="Repayment 1 8000" +... +$ ./gradlew run --args="GetCustomerInfo 1" +... +{"customer_id":1,"name":"Yamada Taro","credit_limit":10000,"credit_total":2000} +... +$ ./gradlew run --args="PlaceOrder 1 3:1,4:1" +... +{"order_id":"3ac4a1bf-a724-4f26-b948-9f03281a971e","customer_id":1,"timestamp":1677565028204} +... +``` + +## Cleanup + +To stop Cassandra and MySQL, run the following command: + +```shell +$ docker-compose down +``` diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/build.gradle b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/build.gradle new file mode 100644 index 00000000..c5cdcc6a --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/build.gradle @@ -0,0 +1,49 @@ +plugins { + id 'java' + id 'application' + id "com.diffplug.spotless" version "5.17.1" +} + +group 'org.sample' +version '1.0-SNAPSHOT' + +repositories { + mavenCentral() + maven { + url = uri("https://maven.pkg.github.com/scalar-labs/scalardb-sql") + credentials { + username = project.findProperty("gpr.user") ?: System.getenv("USERNAME") + password = project.findProperty("gpr.key") ?: System.getenv("TOKEN") + } + } +} + +dependencies { + implementation "com.scalar-labs:scalardb-sql-spring-data:3.9.0" + implementation "com.scalar-labs:scalardb-sql-direct-mode:3.9.0" + // This includes dependencies to `spring-boot-starter` and `picocli` + implementation "info.picocli:picocli-spring-boot-starter:4.7.1" + // For retry + implementation 'org.springframework.boot:spring-boot-starter-aop:2.7.12' + implementation 'org.springframework.retry:spring-retry:1.3.4' + + implementation 'com.fasterxml.jackson.core:jackson-databind:2.15.1' +} + +spotless { + java { + target 'src/*/java/**/*.java' + importOrder() + removeUnusedImports() + googleJavaFormat() + } +} + +application { + mainClassName = 'sample.SampleApp' +} + +archivesBaseName = "sample" + +sourceCompatibility = 1.8 +targetCompatibility = 1.8 diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/docker-compose.yml b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/docker-compose.yml new file mode 100644 index 00000000..4fcc7553 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/docker-compose.yml @@ -0,0 +1,14 @@ +version: "3.5" +services: + mysql: + image: mysql:8.0 + environment: + MYSQL_ROOT_PASSWORD: mysql + container_name: "mysql-1" + ports: + - "3306:3306" + cassandra: + image: cassandra:3.11 + container_name: "cassandra-1" + ports: + - "9042:9042" diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/gradle/wrapper/gradle-wrapper.jar b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 00000000..7454180f Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/gradle/wrapper/gradle-wrapper.jar differ diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/gradle/wrapper/gradle-wrapper.properties b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 00000000..070cb702 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,5 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-7.6-bin.zip +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/gradlew b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/gradlew new file mode 100755 index 00000000..744e882e --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/gradlew @@ -0,0 +1,185 @@ +#!/usr/bin/env sh + +# +# Copyright 2015 the original author or authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn () { + echo "$*" +} + +die () { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MSYS* | MINGW* ) + msys=true + ;; + NONSTOP* ) + nonstop=true + ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin or MSYS, switch paths to Windows format before running java +if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=`expr $i + 1` + done + case $i in + 0) set -- ;; + 1) set -- "$args0" ;; + 2) set -- "$args0" "$args1" ;; + 3) set -- "$args0" "$args1" "$args2" ;; + 4) set -- "$args0" "$args1" "$args2" "$args3" ;; + 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Escape application args +save () { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " +} +APP_ARGS=`save "$@"` + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +exec "$JAVACMD" "$@" diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/gradlew.bat b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/gradlew.bat new file mode 100644 index 00000000..107acd32 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/gradlew.bat @@ -0,0 +1,89 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto execute + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/images/ERD.png b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/images/ERD.png new file mode 100644 index 00000000..02100437 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/images/ERD.png differ diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/images/overview.png b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/images/overview.png new file mode 100644 index 00000000..16749f3b Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/images/overview.png differ diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/scalardb-sql.properties b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/scalardb-sql.properties new file mode 100644 index 00000000..5955744f --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/scalardb-sql.properties @@ -0,0 +1,15 @@ +scalar.db.sql.connection_mode=direct + +scalar.db.storage=multi-storage +scalar.db.multi_storage.storages=cassandra,mysql +scalar.db.multi_storage.storages.cassandra.storage=cassandra +scalar.db.multi_storage.storages.cassandra.contact_points=localhost +scalar.db.multi_storage.storages.cassandra.username=cassandra +scalar.db.multi_storage.storages.cassandra.password=cassandra +scalar.db.multi_storage.storages.mysql.storage=jdbc +scalar.db.multi_storage.storages.mysql.contact_points=jdbc:mysql://localhost:3306/ +scalar.db.multi_storage.storages.mysql.username=root +scalar.db.multi_storage.storages.mysql.password=mysql +scalar.db.multi_storage.namespace_mapping=customer:mysql,order:cassandra,coordinator:cassandra +scalar.db.multi_storage.default_storage=cassandra + diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/schema.sql b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/schema.sql new file mode 100644 index 00000000..673bc795 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/schema.sql @@ -0,0 +1,34 @@ +CREATE COORDINATOR TABLES IF NOT EXIST; + +CREATE NAMESPACE IF NOT EXISTS customer; + +CREATE TABLE IF NOT EXISTS customer.customers ( + customer_id INT PRIMARY KEY, + name TEXT, + credit_limit INT, + credit_total INT +); + +CREATE NAMESPACE IF NOT EXISTS "order"; + +CREATE TABLE IF NOT EXISTS "order".orders ( + customer_id INT, + timestamp BIGINT, + order_id TEXT, + PRIMARY KEY (customer_id, timestamp) +); + +CREATE INDEX IF NOT EXISTS ON "order".orders (order_id); + +CREATE TABLE IF NOT EXISTS "order".statements ( + order_id TEXT, + item_id INT, + count INT, + PRIMARY KEY (order_id, item_id) +); + +CREATE TABLE IF NOT EXISTS "order".items ( + item_id INT PRIMARY KEY, + name TEXT, + price INT +); diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/settings.gradle b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/settings.gradle new file mode 100644 index 00000000..4845930d --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/settings.gradle @@ -0,0 +1 @@ +rootProject.name = 'spring-data-multi-storage-transaction-sample' diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/SampleApp.java b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/SampleApp.java new file mode 100644 index 00000000..45c4885f --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/SampleApp.java @@ -0,0 +1,37 @@ +package sample; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.CommandLineRunner; +import org.springframework.boot.ExitCodeGenerator; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.EnableAutoConfiguration; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.Configuration; +import picocli.CommandLine; +import picocli.CommandLine.IFactory; +import sample.command.SampleCommand; + +@Configuration +@ComponentScan +@EnableAutoConfiguration +public class SampleApp implements CommandLineRunner, ExitCodeGenerator { + private int exitCode; + + @Autowired private IFactory factory; + + @Autowired private SampleCommand sampleCommand; + + @Override + public void run(String... args) { + exitCode = new CommandLine(sampleCommand, factory).execute(args); + } + + @Override + public int getExitCode() { + return exitCode; + } + + public static void main(String[] args) { + System.exit(SpringApplication.exit(SpringApplication.run(SampleApp.class, args))); + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/SampleService.java b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/SampleService.java new file mode 100644 index 00000000..26a3cd4e --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/SampleService.java @@ -0,0 +1,170 @@ +package sample; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.scalar.db.sql.springdata.EnableScalarDbRepositories; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.dao.TransientDataAccessException; +import org.springframework.retry.annotation.Backoff; +import org.springframework.retry.annotation.Retryable; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; +import sample.domain.model.Customer; +import sample.domain.model.Item; +import sample.domain.model.ItemOrder; +import sample.domain.model.Order; +import sample.domain.model.OrderDetail; +import sample.domain.model.Statement; +import sample.domain.model.StatementDetail; +import sample.domain.repository.CustomerRepository; +import sample.domain.repository.ItemRepository; +import sample.domain.repository.OrderRepository; +import sample.domain.repository.StatementRepository; + +@EnableScalarDbRepositories +@Service +@Retryable( + include = TransientDataAccessException.class, + maxAttempts = 8, + backoff = @Backoff(delay = 1000, maxDelay = 8000, multiplier = 2)) +public class SampleService { + private final ObjectMapper objectMapper = new ObjectMapper(); + + @Autowired private CustomerRepository customerRepository; + @Autowired private ItemRepository itemRepository; + @Autowired private OrderRepository orderRepository; + @Autowired private StatementRepository statementRepository; + + @Transactional + public void loadInitialData() { + customerRepository.insertIfNotExists(new Customer(1, "Yamada Taro", 10000, 0)); + customerRepository.insertIfNotExists(new Customer(2, "Yamada Hanako", 10000, 0)); + customerRepository.insertIfNotExists(new Customer(3, "Suzuki Ichiro", 10000, 0)); + itemRepository.insertIfNotExists(new Item(1, "Apple", 1000)); + itemRepository.insertIfNotExists(new Item(2, "Orange", 2000)); + itemRepository.insertIfNotExists(new Item(3, "Grape", 2500)); + itemRepository.insertIfNotExists(new Item(4, "Mango", 5000)); + itemRepository.insertIfNotExists(new Item(5, "Melon", 3000)); + } + + private String asJson(Object obj) { + try { + return objectMapper.writeValueAsString(obj); + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + } + + @Transactional + public String getCustomerInfo(int customerId) { + try { + // Retrieve the customer info for the specified customer ID from the customers table. + // Return the customer info as a JSON format. + return objectMapper.writeValueAsString(customerRepository.getById(customerId)); + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + } + + @Transactional + public String placeOrder(int customerId, List itemOrders) { + String orderId = UUID.randomUUID().toString(); + Order order = new Order(orderId, customerId, System.currentTimeMillis()); + // Put the order info into the orders table + orderRepository.insert(order); + + AtomicInteger amount = new AtomicInteger(); + itemOrders.forEach( + itemOrder -> { + int itemId = itemOrder.itemId; + int count = itemOrder.count; + // Retrieve the item info from the items table + Item item = itemRepository.getById(itemId); + int cost = item.price * count; + // Put the order statement into the statements table + statementRepository.insert(new Statement(itemId, orderId, count)); + // Calculate the total amount + amount.addAndGet(cost); + }); + + Customer customer = customerRepository.getById(customerId); + int creditLimit = customer.creditLimit; + int creditTotal = customer.creditTotal; + int updatedCreditTotal = creditTotal + amount.get(); + // Check if the credit total exceeds the credit limit after payment + if (updatedCreditTotal > creditLimit) { + throw new RuntimeException( + String.format( + "Credit limit exceeded. limit:%d, total:%d", creditLimit, updatedCreditTotal)); + } + // Update credit_total for the customer + customerRepository.update(customer.withCreditTotal(updatedCreditTotal)); + + return asJson(order); + } + + private OrderDetail getOrderDetail(String orderId) { + // Retrieve the order info for the order ID from the orders table + Order order = orderRepository.getById(orderId); + int customerId = order.customerId; + // Retrieve the customer info for the specified customer ID from the customers table + Customer customer = customerRepository.getById(customerId); + + AtomicInteger total = new AtomicInteger(); + List statementDetails = new ArrayList<>(); + // Retrieve the order statements for the order ID from the statements table + statementRepository + .findAllByOrderId(orderId) + .forEach( + statement -> { + // Retrieve the item data from the items table + Item item = itemRepository.getById(statement.itemId); + int cost = item.price * statement.count; + statementDetails.add( + new StatementDetail(item.itemId, item.name, item.price, statement.count, cost)); + total.addAndGet(cost); + }); + + return new OrderDetail( + orderId, customerId, customer.name, order.timestamp, statementDetails, total.get()); + } + + @Transactional + public String getOrderByOrderId(String orderId) { + // Get an order JSON for the specified order ID. + // Return the order info as a JSON format. + return asJson(getOrderDetail(orderId)); + } + + @Transactional + public String getOrdersByCustomerId(int customerId) { + // Retrieve the order info for the customer ID from the orders table. + // Return the order info as a JSON format. + return asJson( + orderRepository.findAllByCustomerIdOrderByTimestampDesc(customerId).stream() + .map(order -> getOrderDetail(order.orderId)) + .collect(Collectors.toList())); + } + + @Transactional + public void repayment(int customerId, int amount) { + Customer customer = customerRepository.getById(customerId); + + int updatedCreditTotal = customer.creditTotal - amount; + + // Check if over repayment or not + if (updatedCreditTotal < 0) { + throw new RuntimeException( + String.format( + "Over repayment. creditTotal:%d, payment:%d", customer.creditTotal, amount)); + } + + // Reduce credit_total for the customer + customerRepository.update(customer.withCreditTotal(updatedCreditTotal)); + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/command/GetCustomerInfoCommand.java b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/command/GetCustomerInfoCommand.java new file mode 100644 index 00000000..9d1df327 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/command/GetCustomerInfoCommand.java @@ -0,0 +1,23 @@ +package sample.command; + +import java.util.concurrent.Callable; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.SampleService; + +@Component +@Command(name = "GetCustomerInfo", description = "Get customer information") +public class GetCustomerInfoCommand implements Callable { + @Autowired private SampleService sampleService; + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Override + public Integer call() { + System.out.println(sampleService.getCustomerInfo(customerId)); + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/command/GetOrderCommand.java b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/command/GetOrderCommand.java new file mode 100644 index 00000000..24c10d47 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/command/GetOrderCommand.java @@ -0,0 +1,23 @@ +package sample.command; + +import java.util.concurrent.Callable; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.SampleService; + +@Component +@Command(name = "GetOrder", description = "Get order information by order ID") +public class GetOrderCommand implements Callable { + @Autowired private SampleService sampleService; + + @Parameters(index = "0", paramLabel = "ORDER_ID", description = "order ID") + private String orderId; + + @Override + public Integer call() { + System.out.println(sampleService.getOrderByOrderId(orderId)); + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/command/GetOrdersCommand.java b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/command/GetOrdersCommand.java new file mode 100644 index 00000000..1ff41af8 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/command/GetOrdersCommand.java @@ -0,0 +1,23 @@ +package sample.command; + +import java.util.concurrent.Callable; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.SampleService; + +@Component +@Command(name = "GetOrders", description = "Get order information by customer ID") +public class GetOrdersCommand implements Callable { + @Autowired private SampleService sampleService; + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Override + public Integer call() { + System.out.println(sampleService.getOrdersByCustomerId(customerId)); + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/command/LoadInitialDataCommand.java b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/command/LoadInitialDataCommand.java new file mode 100644 index 00000000..fe9cec3b --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/command/LoadInitialDataCommand.java @@ -0,0 +1,19 @@ +package sample.command; + +import java.util.concurrent.Callable; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; +import picocli.CommandLine.Command; +import sample.SampleService; + +@Component +@Command(name = "LoadInitialData", description = "Load initial data") +public class LoadInitialDataCommand implements Callable { + @Autowired private SampleService sampleService; + + @Override + public Integer call() { + sampleService.loadInitialData(); + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/command/PlaceOrderCommand.java b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/command/PlaceOrderCommand.java new file mode 100644 index 00000000..7526113f --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/command/PlaceOrderCommand.java @@ -0,0 +1,40 @@ +package sample.command; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.SampleService; +import sample.domain.model.ItemOrder; + +@Component +@Command(name = "PlaceOrder", description = "Place an order") +public class PlaceOrderCommand implements Callable { + @Autowired private SampleService sampleService; + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Parameters( + index = "1", + paramLabel = "ORDERS", + description = "orders. The format is \":,:,...\"") + private String orders; + + @Override + public Integer call() { + String[] split = orders.split(",", -1); + List itemOrders = new ArrayList<>(); + + for (String value : split) { + String[] s = value.split(":", -1); + itemOrders.add(new ItemOrder(Integer.parseInt(s[0]), Integer.parseInt(s[1]))); + } + + System.out.println(sampleService.placeOrder(customerId, itemOrders)); + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/command/RepaymentCommand.java b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/command/RepaymentCommand.java new file mode 100644 index 00000000..7c5a6d2b --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/command/RepaymentCommand.java @@ -0,0 +1,24 @@ +package sample.command; + +import java.util.concurrent.Callable; +import org.springframework.beans.factory.annotation.Autowired; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.SampleService; + +@Command(name = "Repayment", description = "Repayment") +public class RepaymentCommand implements Callable { + @Autowired private SampleService sampleService; + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Parameters(index = "1", paramLabel = "AMOUNT", description = "amount of the money for repayment") + private int amount; + + @Override + public Integer call() { + sampleService.repayment(customerId, amount); + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/command/SampleCommand.java b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/command/SampleCommand.java new file mode 100644 index 00000000..fb824be2 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/command/SampleCommand.java @@ -0,0 +1,34 @@ +package sample.command; + +import org.springframework.stereotype.Component; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Option; + +@Component +@Command( + name = "bin/sample", + description = "Sample application for Microservice Transaction", + subcommands = { + LoadInitialDataCommand.class, + PlaceOrderCommand.class, + GetOrderCommand.class, + GetOrdersCommand.class, + GetCustomerInfoCommand.class, + RepaymentCommand.class + }) +public class SampleCommand implements Runnable { + @Option( + names = {"-h", "--help"}, + usageHelp = true, + description = "Displays this help message and quits.", + defaultValue = "true") + private Boolean showHelp; + + @Override + public void run() { + if (showHelp) { + CommandLine.usage(this, System.out); + } + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/model/Customer.java b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/model/Customer.java new file mode 100644 index 00000000..13d1af9f --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/model/Customer.java @@ -0,0 +1,32 @@ +package sample.domain.model; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.springframework.data.annotation.Id; +import org.springframework.data.relational.core.mapping.Table; + +@Table(schema = "customer", value = "customers") +public class Customer { + @Id + @JsonProperty("customer_id") + public final int customerId; + + @JsonProperty("name") + public final String name; + + @JsonProperty("credit_limit") + public final int creditLimit; + + @JsonProperty("credit_total") + public final int creditTotal; + + public Customer(int customerId, String name, int creditLimit, int creditTotal) { + this.customerId = customerId; + this.name = name; + this.creditLimit = creditLimit; + this.creditTotal = creditTotal; + } + + public Customer withCreditTotal(int newCreditTotal) { + return new Customer(this.customerId, this.name, this.creditLimit, newCreditTotal); + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/model/Item.java b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/model/Item.java new file mode 100644 index 00000000..2f075750 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/model/Item.java @@ -0,0 +1,24 @@ +package sample.domain.model; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.springframework.data.annotation.Id; +import org.springframework.data.relational.core.mapping.Table; + +@Table(schema = "order", value = "items") +public class Item { + @Id + @JsonProperty("item_id") + public final int itemId; + + @JsonProperty("name") + public final String name; + + @JsonProperty("price") + public final int price; + + public Item(int itemId, String name, int price) { + this.itemId = itemId; + this.name = name; + this.price = price; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/model/ItemOrder.java b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/model/ItemOrder.java new file mode 100644 index 00000000..638aec3f --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/model/ItemOrder.java @@ -0,0 +1,11 @@ +package sample.domain.model; + +public class ItemOrder { + public final int itemId; + public final int count; + + public ItemOrder(int itemId, int count) { + this.itemId = itemId; + this.count = count; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/model/Order.java b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/model/Order.java new file mode 100644 index 00000000..83586d8f --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/model/Order.java @@ -0,0 +1,24 @@ +package sample.domain.model; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.springframework.data.annotation.Id; +import org.springframework.data.relational.core.mapping.Table; + +@Table(schema = "order", value = "orders") +public class Order { + @Id + @JsonProperty("order_id") + public final String orderId; + + @JsonProperty("customer_id") + public final int customerId; + + @JsonProperty("timestamp") + public final long timestamp; + + public Order(String orderId, int customerId, long timestamp) { + this.orderId = orderId; + this.customerId = customerId; + this.timestamp = timestamp; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/model/OrderDetail.java b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/model/OrderDetail.java new file mode 100644 index 00000000..57bba54c --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/model/OrderDetail.java @@ -0,0 +1,39 @@ +package sample.domain.model; + +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.List; + +public class OrderDetail { + @JsonProperty("order_id") + public final String orderId; + + @JsonProperty("timestamp") + public final long timestamp; + + @JsonProperty("customer_id") + public final int customerId; + + @JsonProperty("customer_name") + public final String customerName; + + @JsonProperty("statements") + public final List statements; + + @JsonProperty("total") + public final int total; + + public OrderDetail( + String orderId, + int customerId, + String customerName, + long timestamp, + List statements, + int total) { + this.orderId = orderId; + this.customerId = customerId; + this.customerName = customerName; + this.timestamp = timestamp; + this.statements = statements; + this.total = total; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/model/Statement.java b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/model/Statement.java new file mode 100644 index 00000000..67ae769f --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/model/Statement.java @@ -0,0 +1,26 @@ +package sample.domain.model; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.springframework.data.annotation.Id; +import org.springframework.data.relational.core.mapping.Table; + +@Table(schema = "order", value = "statements") +public class Statement { + // This model is actually accessed via a multi-column index, but Spring Data doesn't support it + // while @Id is always required. So, this @Id annotation is a dummy + @Id + @JsonProperty("item_id") + public final int itemId; + + @JsonProperty("order_id") + public final String orderId; + + @JsonProperty("count") + public final int count; + + public Statement(int itemId, String orderId, int count) { + this.itemId = itemId; + this.orderId = orderId; + this.count = count; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/model/StatementDetail.java b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/model/StatementDetail.java new file mode 100644 index 00000000..99c1610d --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/model/StatementDetail.java @@ -0,0 +1,28 @@ +package sample.domain.model; + +import com.fasterxml.jackson.annotation.JsonProperty; + +public class StatementDetail { + @JsonProperty("item_id") + public final int itemId; + + @JsonProperty("item_name") + public final String itemName; + + @JsonProperty("price") + public final int price; + + @JsonProperty("count") + public final int count; + + @JsonProperty("total") + public final int total; + + public StatementDetail(int itemId, String itemName, int price, int count, int total) { + this.itemId = itemId; + this.itemName = itemName; + this.price = price; + this.count = count; + this.total = total; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/repository/CustomerRepository.java b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/repository/CustomerRepository.java new file mode 100644 index 00000000..8258dd5a --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/repository/CustomerRepository.java @@ -0,0 +1,27 @@ +package sample.domain.repository; + +import com.scalar.db.sql.springdata.ScalarDbRepository; +import java.util.Optional; +import org.springframework.stereotype.Repository; +import org.springframework.transaction.annotation.Transactional; +import sample.domain.model.Customer; + +@Transactional +@Repository +public interface CustomerRepository extends ScalarDbRepository { + + default Customer getById(int id) { + Optional entity = findById(id); + if (!entity.isPresent()) { + // If the customer info the specified customer ID doesn't exist, throw an exception + throw new RuntimeException(String.format("Customer not found. id:%d", id)); + } + return entity.get(); + } + + default void insertIfNotExists(Customer customer) { + if (!findById(customer.customerId).isPresent()) { + insert(customer); + } + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/repository/ItemRepository.java b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/repository/ItemRepository.java new file mode 100644 index 00000000..db22e2dc --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/repository/ItemRepository.java @@ -0,0 +1,26 @@ +package sample.domain.repository; + +import com.scalar.db.sql.springdata.ScalarDbRepository; +import java.util.Optional; +import org.springframework.stereotype.Repository; +import org.springframework.transaction.annotation.Transactional; +import sample.domain.model.Item; + +@Transactional +@Repository +public interface ItemRepository extends ScalarDbRepository { + + default Item getById(int id) { + Optional entity = findById(id); + if (!entity.isPresent()) { + throw new RuntimeException(String.format("Item not found. id:%d", id)); + } + return entity.get(); + } + + default void insertIfNotExists(Item item) { + if (!findById(item.itemId).isPresent()) { + insert(item); + } + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/repository/OrderRepository.java b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/repository/OrderRepository.java new file mode 100644 index 00000000..2b31505e --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/repository/OrderRepository.java @@ -0,0 +1,23 @@ +package sample.domain.repository; + +import com.scalar.db.sql.springdata.ScalarDbRepository; +import java.util.List; +import java.util.Optional; +import org.springframework.stereotype.Repository; +import org.springframework.transaction.annotation.Transactional; +import sample.domain.model.Order; + +@Transactional +@Repository +public interface OrderRepository extends ScalarDbRepository { + + default Order getById(String id) { + Optional entity = findById(id); + if (!entity.isPresent()) { + throw new RuntimeException(String.format("Order not found. id:%s", id)); + } + return entity.get(); + } + + List findAllByCustomerIdOrderByTimestampDesc(int customerId); +} diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/repository/StatementRepository.java b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/repository/StatementRepository.java new file mode 100644 index 00000000..72fec45f --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/java/sample/domain/repository/StatementRepository.java @@ -0,0 +1,14 @@ +package sample.domain.repository; + +import com.scalar.db.sql.springdata.ScalarDbRepository; +import java.util.List; +import org.springframework.stereotype.Repository; +import org.springframework.transaction.annotation.Transactional; +import sample.domain.model.Statement; + +@Transactional +@Repository +public interface StatementRepository extends ScalarDbRepository { + + List findAllByOrderId(String orderId); +} diff --git a/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/resources/application.properties b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/resources/application.properties new file mode 100644 index 00000000..8756957a --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-multi-storage-transaction-sample/src/main/resources/application.properties @@ -0,0 +1,18 @@ +# Suppress warnings (e.g. "You listed localhost/127.0.0.1:9042 in your contact points, but it wasn't found in the control host's system.peers at startup") from com.datastax.driver +logging.level.root=ERROR +spring.datasource.driver-class-name=com.scalar.db.sql.jdbc.SqlJdbcDriver +spring.datasource.url=jdbc:scalardb:\ +?scalar.db.sql.connection_mode=direct\ +&scalar.db.storage=multi-storage\ +&scalar.db.multi_storage.storages=cassandra,mysql\ +&scalar.db.multi_storage.storages.cassandra.storage=cassandra\ +&scalar.db.multi_storage.storages.cassandra.contact_points=localhost\ +&scalar.db.multi_storage.storages.cassandra.username=cassandra\ +&scalar.db.multi_storage.storages.cassandra.password=cassandra\ +&scalar.db.multi_storage.storages.mysql.storage=jdbc\ +&scalar.db.multi_storage.storages.mysql.contact_points=jdbc:mysql://localhost:3306/\ +&scalar.db.multi_storage.storages.mysql.username=root\ +&scalar.db.multi_storage.storages.mysql.password=mysql\ +&scalar.db.multi_storage.namespace_mapping=customer:mysql,order:cassandra,coordinator:cassandra\ +&scalar.db.multi_storage.default_storage=cassandra\ +&scalar.db.consensus_commit.isolation_level=SERIALIZABLE diff --git a/docs/3.12/scalardb-samples/spring-data-sample/README.md b/docs/3.12/scalardb-samples/spring-data-sample/README.md new file mode 100644 index 00000000..db9ce7a2 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/README.md @@ -0,0 +1,292 @@ +# Sample application of Spring Data JDBC for ScalarDB + +This tutorial describes how to create a sample Spring Boot application by using Spring Data JDBC for ScalarDB. + +## Prerequisites + +- Java (OpenJDK 8 or higher) +- Gradle +- Docker, Docker Compose + +In addition, you need access to the [ScalarDB SQL GitHub repository](https://github.com/scalar-labs/scalardb-sql) and [Packages in ScalarDB SQL repository](https://github.com/orgs/scalar-labs/packages?repo_name=scalardb-sql). +These repositories are available only to users with a commercial license and permission. +To get a license and permission, please [contact us](https://www.scalar-labs.com/contact/). + +You also need the `gpr.user` property for your GitHub username and the `gpr.key` property for your personal access token. +You must either add these properties in `~/.gradle/gradle.properties` or specify the properties by using the `-P` option when running the `./gradlew` command as follows: + +```shell +$ ./gradlew run ... -Pgpr.user= -Pgpr.key= +``` + +Or you can also use environment variables, `USERNAME` for your GitHub username and `TOKEN` for your personal access token. + +```shell +$ export USERNAME= +$ export TOKEN= +``` + +For more details, see [Install - ScalarDB SQL](https://github.com/scalar-labs/scalardb-sql#install). + +## Sample application + +### Overview + +This tutorial describes how to create a sample Spring Boot application for the same use case as [ScalarDB Sample](https://github.com/scalar-labs/scalardb-samples/tree/main/scalardb-sample) but by using Spring Data JDBC for ScalarDB. +Please note that application-specific error handling, authentication processing, etc. are omitted in the sample application since this tutorial focuses on explaining how to use Spring Data JDBC for ScalarDB. +For details, please see [Guide of Spring Data JDBC for ScalarDB](https://github.com/scalar-labs/scalardb-sql/blob/main/docs/spring-data-guide.md). + +### Schema + +[The schema](schema.sql) is as follows: + +```sql +CREATE COORDINATOR TABLES IF NOT EXIST; + +CREATE NAMESPACE IF NOT EXISTS sample; + +CREATE TABLE IF NOT EXISTS sample.customers ( + customer_id INT PRIMARY KEY, + name TEXT, + credit_limit INT, + credit_total INT +); + +CREATE TABLE IF NOT EXISTS sample.orders ( + customer_id INT, + timestamp BIGINT, + order_id TEXT, + PRIMARY KEY (customer_id, timestamp) +); + +CREATE INDEX IF NOT EXISTS ON sample.orders (order_id); + +CREATE TABLE IF NOT EXISTS sample.statements ( + order_id TEXT, + item_id INT, + count INT, + PRIMARY KEY (order_id, item_id) +); + +CREATE TABLE IF NOT EXISTS sample.items ( + item_id INT PRIMARY KEY, + name TEXT, + price INT +); +``` + +All the tables are created in the `sample` namespace. + +- `sample.customers`: a table that manages customers' information + - `credit_limit`: the maximum amount of money a lender will allow each customer to spend when using a credit card + - `credit_total`: the amount of money that each customer has already spent by using the credit card +- `sample.orders`: a table that manages order information +- `sample.statements`: a table that manages order statement information +- `sample.items`: a table that manages information of items to be ordered + +The Entity Relationship Diagram for the schema is as follows: + +![ERD](images/ERD.png) + +### Transactions + +The following five transactions are implemented in this sample application: + +1. Getting customer information +2. Placing an order by credit card (checks if the cost of the order is below the credit limit, then records order history and updates the `credit_total` if the check passes) +3. Getting order information by order ID +4. Getting order information by customer ID +5. Repayment (reduces the amount in the `credit_total`) + +## Configuration + +Configurations for the sample Spring Boot application are as follows: + +```application.properties +spring.datasource.driver-class-name=com.scalar.db.sql.jdbc.SqlJdbcDriver +spring.datasource.url=jdbc:scalardb:\ +?scalar.db.sql.connection_mode=direct\ +&scalar.db.storage=cassandra\ +&scalar.db.contact_points=localhost\ +&scalar.db.username=cassandra\ +&scalar.db.password=cassandra\ +&scalar.db.consensus_commit.isolation_level=SERIALIZABLE\ +&scalar.db.sql.default_namespace_name=sample +``` + +Since this sample application uses Cassandra, as shown above, you need to configure your settings for Cassandra in this configuration. + +## Setup + +### Clone the ScalarDB samples repository + +Open Terminal, then clone the ScalarDB samples repository by running the following command: + +```shell +$ git clone https://github.com/scalar-labs/scalardb-samples +``` + +Then, go to the directory with this sample by running the following command: + +```shell +$ cd scalardb-samples/spring-data-sample +``` + +### Start Cassandra + +To start Cassandra, you need to run the following `docker-compose` command: + +```shell +$ docker-compose up -d +``` + +Please note that starting the containers may take more than one minute. + +### Load schema + +You then need to apply the schema with the following command. +To download the CLI tool, `scalardb-sql-cli--all.jar`, see the [Releases](https://github.com/scalar-labs/scalardb-sql/releases) of ScalarDB SQL and download the version that you want to use. + +```shell +$ java -jar scalardb-sql-cli--all.jar --config scalardb-sql.properties --file schema.sql +``` + +### Load initial data + +After the containers have started, you need to load the initial data by running the following command: + +```shell +$ ./gradlew run --args="LoadInitialData" +``` + +After the initial data has loaded, the following records should be stored in the tables: + +- For the `sample.customers` table: + +| customer_id | name | credit_limit | credit_total | +|-------------|---------------|--------------|--------------| +| 1 | Yamada Taro | 10000 | 0 | +| 2 | Yamada Hanako | 10000 | 0 | +| 3 | Suzuki Ichiro | 10000 | 0 | + +- For the `sample.items` table: + +| item_id | name | price | +|---------|--------|-------| +| 1 | Apple | 1000 | +| 2 | Orange | 2000 | +| 3 | Grape | 2500 | +| 4 | Mango | 5000 | +| 5 | Melon | 3000 | + +## Run the sample application + +Let's start with getting information about the customer whose ID is `1`: + +```shell +$ ./gradlew run --args="GetCustomerInfo 1" +... +{"customer_id":1,"name":"Yamada Taro","credit_limit":10000,"credit_total":0} +... +``` + +Then, place an order for three apples and two oranges by using customer ID `1`. Note that the order format is `:,:,...`: + +```shell +$ ./gradlew run --args="PlaceOrder 1 1:3,2:2" +... +{"order_id":"2358ab35-5819-4f8f-acb1-12e73d97d34e","customer_id":1,"timestamp":1677478005400} +... +``` + +You can see that running this command shows the order ID. + +Let's check the details of the order by using the order ID: + +```shell +$ ./gradlew run --args="GetOrder 2358ab35-5819-4f8f-acb1-12e73d97d34e" +... +{"order_id":"2358ab35-5819-4f8f-acb1-12e73d97d34e","timestamp":1677478005400,"customer_id":1,"customer_name":"Yamada Taro","statements":[{"item_id":1,"item_name":"Apple","price":1000,"count":3,"total":3000},{"item_id":2,"item_name":"Orange","price":2000,"count":2,"total":4000}],"total":7000} +... +``` + +Then, let's place another order and get the order history of customer ID `1`: + +```shell +$ ./gradlew run --args="PlaceOrder 1 5:1" +... +{"order_id":"46062b16-b71b-46f9-a9ff-dc6b0991259b","customer_id":1,"timestamp":1677478201428} +... +$ ./gradlew run --args="GetOrders 1" +... +[{"order_id":"46062b16-b71b-46f9-a9ff-dc6b0991259b","timestamp":1677478201428,"customer_id":1,"customer_name":"Yamada Taro","statements":[{"item_id":5,"item_name":"Melon","price":3000,"count":1,"total":3000}],"total":3000},{"order_id":"2358ab35-5819-4f8f-acb1-12e73d97d34e","timestamp":1677478005400,"customer_id":1,"customer_name":"Yamada Taro","statements":[{"item_id":1,"item_name":"Apple","price":1000,"count":3,"total":3000},{"item_id":2,"item_name":"Orange","price":2000,"count":2,"total":4000}],"total":7000}] +... +``` + +This order history is shown in descending order by timestamp. + +The customer's current `credit_total` is `10000`. Since the customer has now reached their `credit_limit`, which was shown when retrieving their information, they cannot place anymore orders. + +```shell +$ ./gradlew run --args="GetCustomerInfo 1" +... +{"id": 1, "name": "Yamada Taro", "credit_limit": 10000, "credit_total": 10000} +... +$ ./gradlew run --args="PlaceOrder 1 3:1,4:1" +... +java.lang.RuntimeException: Credit limit exceeded. limit:10000, total:17500 + at sample.SampleService.placeOrder(SampleService.java:102) + at sample.SampleService$$FastClassBySpringCGLIB$$1123c447.invoke() + at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218) + at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.invokeJoinpoint(CglibAopProxy.java:793) + at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:163) + at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.proceed(CglibAopProxy.java:763) + at org.springframework.transaction.interceptor.TransactionInterceptor$1.proceedWithInvocation(TransactionInterceptor.java:123) + at org.springframework.transaction.interceptor.TransactionAspectSupport.invokeWithinTransaction(TransactionAspectSupport.java:388) + at org.springframework.transaction.interceptor.TransactionInterceptor.invoke(TransactionInterceptor.java:119) + at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186) + at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.proceed(CglibAopProxy.java:763) + at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:708) + at sample.SampleService$$EnhancerBySpringCGLIB$$a94e1d9.placeOrder() + at sample.command.PlaceOrderCommand.call(PlaceOrderCommand.java:37) + at sample.command.PlaceOrderCommand.call(PlaceOrderCommand.java:13) + at picocli.CommandLine.executeUserObject(CommandLine.java:2041) + at picocli.CommandLine.access$1500(CommandLine.java:148) + at picocli.CommandLine$RunLast.executeUserObjectOfLastSubcommandWithSameParent(CommandLine.java:2461) + at picocli.CommandLine$RunLast.handle(CommandLine.java:2453) + at picocli.CommandLine$RunLast.handle(CommandLine.java:2415) + at picocli.CommandLine$AbstractParseResultHandler.execute(CommandLine.java:2273) + at picocli.CommandLine$RunLast.execute(CommandLine.java:2417) + at picocli.CommandLine.execute(CommandLine.java:2170) + at sample.SampleApp.run(SampleApp.java:26) + at org.springframework.boot.SpringApplication.callRunner(SpringApplication.java:768) + at org.springframework.boot.SpringApplication.callRunners(SpringApplication.java:752) + at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) + at org.springframework.boot.SpringApplication.run(SpringApplication.java:1303) + at org.springframework.boot.SpringApplication.run(SpringApplication.java:1292) + at sample.SampleApp.main(SampleApp.java:35) +... +``` + +After making a payment, the customer will be able to place orders again. + +```shell +$ ./gradlew run --args="Repayment 1 8000" +... +$ ./gradlew run --args="GetCustomerInfo 1" +... +{"customer_id":1,"name":"Yamada Taro","credit_limit":10000,"credit_total":2000} +... +$ ./gradlew run --args="PlaceOrder 1 3:1,4:1" +... +{"order_id":"0350947a-9003-46f2-870e-6aa4b2df0f1f","customer_id":1,"timestamp":1677478728134} +... +``` + +## Cleanup + +To stop Cassandra, run the following command: + +```shell +$ docker-compose down +``` diff --git a/docs/3.12/scalardb-samples/spring-data-sample/build.gradle b/docs/3.12/scalardb-samples/spring-data-sample/build.gradle new file mode 100644 index 00000000..c5cdcc6a --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/build.gradle @@ -0,0 +1,49 @@ +plugins { + id 'java' + id 'application' + id "com.diffplug.spotless" version "5.17.1" +} + +group 'org.sample' +version '1.0-SNAPSHOT' + +repositories { + mavenCentral() + maven { + url = uri("https://maven.pkg.github.com/scalar-labs/scalardb-sql") + credentials { + username = project.findProperty("gpr.user") ?: System.getenv("USERNAME") + password = project.findProperty("gpr.key") ?: System.getenv("TOKEN") + } + } +} + +dependencies { + implementation "com.scalar-labs:scalardb-sql-spring-data:3.9.0" + implementation "com.scalar-labs:scalardb-sql-direct-mode:3.9.0" + // This includes dependencies to `spring-boot-starter` and `picocli` + implementation "info.picocli:picocli-spring-boot-starter:4.7.1" + // For retry + implementation 'org.springframework.boot:spring-boot-starter-aop:2.7.12' + implementation 'org.springframework.retry:spring-retry:1.3.4' + + implementation 'com.fasterxml.jackson.core:jackson-databind:2.15.1' +} + +spotless { + java { + target 'src/*/java/**/*.java' + importOrder() + removeUnusedImports() + googleJavaFormat() + } +} + +application { + mainClassName = 'sample.SampleApp' +} + +archivesBaseName = "sample" + +sourceCompatibility = 1.8 +targetCompatibility = 1.8 diff --git a/docs/3.12/scalardb-samples/spring-data-sample/docker-compose.yml b/docs/3.12/scalardb-samples/spring-data-sample/docker-compose.yml new file mode 100644 index 00000000..2eb12b92 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/docker-compose.yml @@ -0,0 +1,7 @@ +version: "3.5" +services: + cassandra: + image: cassandra:3.11 + container_name: "cassandra-1" + ports: + - "9042:9042" diff --git a/docs/3.12/scalardb-samples/spring-data-sample/gradle/wrapper/gradle-wrapper.jar b/docs/3.12/scalardb-samples/spring-data-sample/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 00000000..7454180f Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-sample/gradle/wrapper/gradle-wrapper.jar differ diff --git a/docs/3.12/scalardb-samples/spring-data-sample/gradle/wrapper/gradle-wrapper.properties b/docs/3.12/scalardb-samples/spring-data-sample/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 00000000..070cb702 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,5 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-7.6-bin.zip +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/docs/3.12/scalardb-samples/spring-data-sample/gradlew b/docs/3.12/scalardb-samples/spring-data-sample/gradlew new file mode 100755 index 00000000..744e882e --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/gradlew @@ -0,0 +1,185 @@ +#!/usr/bin/env sh + +# +# Copyright 2015 the original author or authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn () { + echo "$*" +} + +die () { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MSYS* | MINGW* ) + msys=true + ;; + NONSTOP* ) + nonstop=true + ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin or MSYS, switch paths to Windows format before running java +if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=`expr $i + 1` + done + case $i in + 0) set -- ;; + 1) set -- "$args0" ;; + 2) set -- "$args0" "$args1" ;; + 3) set -- "$args0" "$args1" "$args2" ;; + 4) set -- "$args0" "$args1" "$args2" "$args3" ;; + 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Escape application args +save () { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " +} +APP_ARGS=`save "$@"` + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +exec "$JAVACMD" "$@" diff --git a/docs/3.12/scalardb-samples/spring-data-sample/gradlew.bat b/docs/3.12/scalardb-samples/spring-data-sample/gradlew.bat new file mode 100644 index 00000000..107acd32 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/gradlew.bat @@ -0,0 +1,89 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto execute + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/docs/3.12/scalardb-samples/spring-data-sample/images/ERD.png b/docs/3.12/scalardb-samples/spring-data-sample/images/ERD.png new file mode 100644 index 00000000..1a6d13c5 Binary files /dev/null and b/docs/3.12/scalardb-samples/spring-data-sample/images/ERD.png differ diff --git a/docs/3.12/scalardb-samples/spring-data-sample/scalardb-sql.properties b/docs/3.12/scalardb-samples/spring-data-sample/scalardb-sql.properties new file mode 100644 index 00000000..5461f78f --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/scalardb-sql.properties @@ -0,0 +1,6 @@ +scalar.db.sql.connection_mode=direct + +scalar.db.storage=cassandra +scalar.db.contact_points=localhost +scalar.db.username=cassandra +scalar.db.password=cassandra diff --git a/docs/3.12/scalardb-samples/spring-data-sample/schema.sql b/docs/3.12/scalardb-samples/spring-data-sample/schema.sql new file mode 100644 index 00000000..d3ad7fc5 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/schema.sql @@ -0,0 +1,32 @@ +CREATE COORDINATOR TABLES IF NOT EXIST; + +CREATE NAMESPACE IF NOT EXISTS sample; + +CREATE TABLE IF NOT EXISTS sample.customers ( + customer_id INT PRIMARY KEY, + name TEXT, + credit_limit INT, + credit_total INT +); + +CREATE TABLE IF NOT EXISTS sample.orders ( + customer_id INT, + timestamp BIGINT, + order_id TEXT, + PRIMARY KEY (customer_id, timestamp) +); + +CREATE INDEX IF NOT EXISTS ON sample.orders (order_id); + +CREATE TABLE IF NOT EXISTS sample.statements ( + order_id TEXT, + item_id INT, + count INT, + PRIMARY KEY (order_id, item_id) +); + +CREATE TABLE IF NOT EXISTS sample.items ( + item_id INT PRIMARY KEY, + name TEXT, + price INT +); diff --git a/docs/3.12/scalardb-samples/spring-data-sample/settings.gradle b/docs/3.12/scalardb-samples/spring-data-sample/settings.gradle new file mode 100644 index 00000000..7fae624d --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/settings.gradle @@ -0,0 +1 @@ +rootProject.name = 'spring-data-sample' diff --git a/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/SampleApp.java b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/SampleApp.java new file mode 100644 index 00000000..45c4885f --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/SampleApp.java @@ -0,0 +1,37 @@ +package sample; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.CommandLineRunner; +import org.springframework.boot.ExitCodeGenerator; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.EnableAutoConfiguration; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.Configuration; +import picocli.CommandLine; +import picocli.CommandLine.IFactory; +import sample.command.SampleCommand; + +@Configuration +@ComponentScan +@EnableAutoConfiguration +public class SampleApp implements CommandLineRunner, ExitCodeGenerator { + private int exitCode; + + @Autowired private IFactory factory; + + @Autowired private SampleCommand sampleCommand; + + @Override + public void run(String... args) { + exitCode = new CommandLine(sampleCommand, factory).execute(args); + } + + @Override + public int getExitCode() { + return exitCode; + } + + public static void main(String[] args) { + System.exit(SpringApplication.exit(SpringApplication.run(SampleApp.class, args))); + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/SampleService.java b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/SampleService.java new file mode 100644 index 00000000..26a3cd4e --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/SampleService.java @@ -0,0 +1,170 @@ +package sample; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.scalar.db.sql.springdata.EnableScalarDbRepositories; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.dao.TransientDataAccessException; +import org.springframework.retry.annotation.Backoff; +import org.springframework.retry.annotation.Retryable; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; +import sample.domain.model.Customer; +import sample.domain.model.Item; +import sample.domain.model.ItemOrder; +import sample.domain.model.Order; +import sample.domain.model.OrderDetail; +import sample.domain.model.Statement; +import sample.domain.model.StatementDetail; +import sample.domain.repository.CustomerRepository; +import sample.domain.repository.ItemRepository; +import sample.domain.repository.OrderRepository; +import sample.domain.repository.StatementRepository; + +@EnableScalarDbRepositories +@Service +@Retryable( + include = TransientDataAccessException.class, + maxAttempts = 8, + backoff = @Backoff(delay = 1000, maxDelay = 8000, multiplier = 2)) +public class SampleService { + private final ObjectMapper objectMapper = new ObjectMapper(); + + @Autowired private CustomerRepository customerRepository; + @Autowired private ItemRepository itemRepository; + @Autowired private OrderRepository orderRepository; + @Autowired private StatementRepository statementRepository; + + @Transactional + public void loadInitialData() { + customerRepository.insertIfNotExists(new Customer(1, "Yamada Taro", 10000, 0)); + customerRepository.insertIfNotExists(new Customer(2, "Yamada Hanako", 10000, 0)); + customerRepository.insertIfNotExists(new Customer(3, "Suzuki Ichiro", 10000, 0)); + itemRepository.insertIfNotExists(new Item(1, "Apple", 1000)); + itemRepository.insertIfNotExists(new Item(2, "Orange", 2000)); + itemRepository.insertIfNotExists(new Item(3, "Grape", 2500)); + itemRepository.insertIfNotExists(new Item(4, "Mango", 5000)); + itemRepository.insertIfNotExists(new Item(5, "Melon", 3000)); + } + + private String asJson(Object obj) { + try { + return objectMapper.writeValueAsString(obj); + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + } + + @Transactional + public String getCustomerInfo(int customerId) { + try { + // Retrieve the customer info for the specified customer ID from the customers table. + // Return the customer info as a JSON format. + return objectMapper.writeValueAsString(customerRepository.getById(customerId)); + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + } + + @Transactional + public String placeOrder(int customerId, List itemOrders) { + String orderId = UUID.randomUUID().toString(); + Order order = new Order(orderId, customerId, System.currentTimeMillis()); + // Put the order info into the orders table + orderRepository.insert(order); + + AtomicInteger amount = new AtomicInteger(); + itemOrders.forEach( + itemOrder -> { + int itemId = itemOrder.itemId; + int count = itemOrder.count; + // Retrieve the item info from the items table + Item item = itemRepository.getById(itemId); + int cost = item.price * count; + // Put the order statement into the statements table + statementRepository.insert(new Statement(itemId, orderId, count)); + // Calculate the total amount + amount.addAndGet(cost); + }); + + Customer customer = customerRepository.getById(customerId); + int creditLimit = customer.creditLimit; + int creditTotal = customer.creditTotal; + int updatedCreditTotal = creditTotal + amount.get(); + // Check if the credit total exceeds the credit limit after payment + if (updatedCreditTotal > creditLimit) { + throw new RuntimeException( + String.format( + "Credit limit exceeded. limit:%d, total:%d", creditLimit, updatedCreditTotal)); + } + // Update credit_total for the customer + customerRepository.update(customer.withCreditTotal(updatedCreditTotal)); + + return asJson(order); + } + + private OrderDetail getOrderDetail(String orderId) { + // Retrieve the order info for the order ID from the orders table + Order order = orderRepository.getById(orderId); + int customerId = order.customerId; + // Retrieve the customer info for the specified customer ID from the customers table + Customer customer = customerRepository.getById(customerId); + + AtomicInteger total = new AtomicInteger(); + List statementDetails = new ArrayList<>(); + // Retrieve the order statements for the order ID from the statements table + statementRepository + .findAllByOrderId(orderId) + .forEach( + statement -> { + // Retrieve the item data from the items table + Item item = itemRepository.getById(statement.itemId); + int cost = item.price * statement.count; + statementDetails.add( + new StatementDetail(item.itemId, item.name, item.price, statement.count, cost)); + total.addAndGet(cost); + }); + + return new OrderDetail( + orderId, customerId, customer.name, order.timestamp, statementDetails, total.get()); + } + + @Transactional + public String getOrderByOrderId(String orderId) { + // Get an order JSON for the specified order ID. + // Return the order info as a JSON format. + return asJson(getOrderDetail(orderId)); + } + + @Transactional + public String getOrdersByCustomerId(int customerId) { + // Retrieve the order info for the customer ID from the orders table. + // Return the order info as a JSON format. + return asJson( + orderRepository.findAllByCustomerIdOrderByTimestampDesc(customerId).stream() + .map(order -> getOrderDetail(order.orderId)) + .collect(Collectors.toList())); + } + + @Transactional + public void repayment(int customerId, int amount) { + Customer customer = customerRepository.getById(customerId); + + int updatedCreditTotal = customer.creditTotal - amount; + + // Check if over repayment or not + if (updatedCreditTotal < 0) { + throw new RuntimeException( + String.format( + "Over repayment. creditTotal:%d, payment:%d", customer.creditTotal, amount)); + } + + // Reduce credit_total for the customer + customerRepository.update(customer.withCreditTotal(updatedCreditTotal)); + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/command/GetCustomerInfoCommand.java b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/command/GetCustomerInfoCommand.java new file mode 100644 index 00000000..9d1df327 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/command/GetCustomerInfoCommand.java @@ -0,0 +1,23 @@ +package sample.command; + +import java.util.concurrent.Callable; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.SampleService; + +@Component +@Command(name = "GetCustomerInfo", description = "Get customer information") +public class GetCustomerInfoCommand implements Callable { + @Autowired private SampleService sampleService; + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Override + public Integer call() { + System.out.println(sampleService.getCustomerInfo(customerId)); + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/command/GetOrderCommand.java b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/command/GetOrderCommand.java new file mode 100644 index 00000000..24c10d47 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/command/GetOrderCommand.java @@ -0,0 +1,23 @@ +package sample.command; + +import java.util.concurrent.Callable; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.SampleService; + +@Component +@Command(name = "GetOrder", description = "Get order information by order ID") +public class GetOrderCommand implements Callable { + @Autowired private SampleService sampleService; + + @Parameters(index = "0", paramLabel = "ORDER_ID", description = "order ID") + private String orderId; + + @Override + public Integer call() { + System.out.println(sampleService.getOrderByOrderId(orderId)); + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/command/GetOrdersCommand.java b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/command/GetOrdersCommand.java new file mode 100644 index 00000000..1ff41af8 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/command/GetOrdersCommand.java @@ -0,0 +1,23 @@ +package sample.command; + +import java.util.concurrent.Callable; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.SampleService; + +@Component +@Command(name = "GetOrders", description = "Get order information by customer ID") +public class GetOrdersCommand implements Callable { + @Autowired private SampleService sampleService; + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Override + public Integer call() { + System.out.println(sampleService.getOrdersByCustomerId(customerId)); + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/command/LoadInitialDataCommand.java b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/command/LoadInitialDataCommand.java new file mode 100644 index 00000000..fe9cec3b --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/command/LoadInitialDataCommand.java @@ -0,0 +1,19 @@ +package sample.command; + +import java.util.concurrent.Callable; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; +import picocli.CommandLine.Command; +import sample.SampleService; + +@Component +@Command(name = "LoadInitialData", description = "Load initial data") +public class LoadInitialDataCommand implements Callable { + @Autowired private SampleService sampleService; + + @Override + public Integer call() { + sampleService.loadInitialData(); + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/command/PlaceOrderCommand.java b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/command/PlaceOrderCommand.java new file mode 100644 index 00000000..7526113f --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/command/PlaceOrderCommand.java @@ -0,0 +1,40 @@ +package sample.command; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.SampleService; +import sample.domain.model.ItemOrder; + +@Component +@Command(name = "PlaceOrder", description = "Place an order") +public class PlaceOrderCommand implements Callable { + @Autowired private SampleService sampleService; + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Parameters( + index = "1", + paramLabel = "ORDERS", + description = "orders. The format is \":,:,...\"") + private String orders; + + @Override + public Integer call() { + String[] split = orders.split(",", -1); + List itemOrders = new ArrayList<>(); + + for (String value : split) { + String[] s = value.split(":", -1); + itemOrders.add(new ItemOrder(Integer.parseInt(s[0]), Integer.parseInt(s[1]))); + } + + System.out.println(sampleService.placeOrder(customerId, itemOrders)); + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/command/RepaymentCommand.java b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/command/RepaymentCommand.java new file mode 100644 index 00000000..7c5a6d2b --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/command/RepaymentCommand.java @@ -0,0 +1,24 @@ +package sample.command; + +import java.util.concurrent.Callable; +import org.springframework.beans.factory.annotation.Autowired; +import picocli.CommandLine.Command; +import picocli.CommandLine.Parameters; +import sample.SampleService; + +@Command(name = "Repayment", description = "Repayment") +public class RepaymentCommand implements Callable { + @Autowired private SampleService sampleService; + + @Parameters(index = "0", paramLabel = "CUSTOMER_ID", description = "customer ID") + private int customerId; + + @Parameters(index = "1", paramLabel = "AMOUNT", description = "amount of the money for repayment") + private int amount; + + @Override + public Integer call() { + sampleService.repayment(customerId, amount); + return 0; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/command/SampleCommand.java b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/command/SampleCommand.java new file mode 100644 index 00000000..fb824be2 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/command/SampleCommand.java @@ -0,0 +1,34 @@ +package sample.command; + +import org.springframework.stereotype.Component; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Option; + +@Component +@Command( + name = "bin/sample", + description = "Sample application for Microservice Transaction", + subcommands = { + LoadInitialDataCommand.class, + PlaceOrderCommand.class, + GetOrderCommand.class, + GetOrdersCommand.class, + GetCustomerInfoCommand.class, + RepaymentCommand.class + }) +public class SampleCommand implements Runnable { + @Option( + names = {"-h", "--help"}, + usageHelp = true, + description = "Displays this help message and quits.", + defaultValue = "true") + private Boolean showHelp; + + @Override + public void run() { + if (showHelp) { + CommandLine.usage(this, System.out); + } + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/model/Customer.java b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/model/Customer.java new file mode 100644 index 00000000..aa28c05b --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/model/Customer.java @@ -0,0 +1,32 @@ +package sample.domain.model; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.springframework.data.annotation.Id; +import org.springframework.data.relational.core.mapping.Table; + +@Table("customers") +public class Customer { + @Id + @JsonProperty("customer_id") + public final int customerId; + + @JsonProperty("name") + public final String name; + + @JsonProperty("credit_limit") + public final int creditLimit; + + @JsonProperty("credit_total") + public final int creditTotal; + + public Customer(int customerId, String name, int creditLimit, int creditTotal) { + this.customerId = customerId; + this.name = name; + this.creditLimit = creditLimit; + this.creditTotal = creditTotal; + } + + public Customer withCreditTotal(int newCreditTotal) { + return new Customer(this.customerId, this.name, this.creditLimit, newCreditTotal); + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/model/Item.java b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/model/Item.java new file mode 100644 index 00000000..b8a09771 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/model/Item.java @@ -0,0 +1,24 @@ +package sample.domain.model; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.springframework.data.annotation.Id; +import org.springframework.data.relational.core.mapping.Table; + +@Table("items") +public class Item { + @Id + @JsonProperty("item_id") + public final int itemId; + + @JsonProperty("name") + public final String name; + + @JsonProperty("price") + public final int price; + + public Item(int itemId, String name, int price) { + this.itemId = itemId; + this.name = name; + this.price = price; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/model/ItemOrder.java b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/model/ItemOrder.java new file mode 100644 index 00000000..95c1f170 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/model/ItemOrder.java @@ -0,0 +1,12 @@ +package sample.domain.model; + +public class ItemOrder { + + public final int itemId; + public final int count; + + public ItemOrder(int itemId, int count) { + this.itemId = itemId; + this.count = count; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/model/Order.java b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/model/Order.java new file mode 100644 index 00000000..ba4e08d0 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/model/Order.java @@ -0,0 +1,24 @@ +package sample.domain.model; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.springframework.data.annotation.Id; +import org.springframework.data.relational.core.mapping.Table; + +@Table("orders") +public class Order { + @Id + @JsonProperty("order_id") + public final String orderId; + + @JsonProperty("customer_id") + public final int customerId; + + @JsonProperty("timestamp") + public final long timestamp; + + public Order(String orderId, int customerId, long timestamp) { + this.orderId = orderId; + this.customerId = customerId; + this.timestamp = timestamp; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/model/OrderDetail.java b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/model/OrderDetail.java new file mode 100644 index 00000000..57bba54c --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/model/OrderDetail.java @@ -0,0 +1,39 @@ +package sample.domain.model; + +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.List; + +public class OrderDetail { + @JsonProperty("order_id") + public final String orderId; + + @JsonProperty("timestamp") + public final long timestamp; + + @JsonProperty("customer_id") + public final int customerId; + + @JsonProperty("customer_name") + public final String customerName; + + @JsonProperty("statements") + public final List statements; + + @JsonProperty("total") + public final int total; + + public OrderDetail( + String orderId, + int customerId, + String customerName, + long timestamp, + List statements, + int total) { + this.orderId = orderId; + this.customerId = customerId; + this.customerName = customerName; + this.timestamp = timestamp; + this.statements = statements; + this.total = total; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/model/Statement.java b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/model/Statement.java new file mode 100644 index 00000000..e206f11d --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/model/Statement.java @@ -0,0 +1,26 @@ +package sample.domain.model; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.springframework.data.annotation.Id; +import org.springframework.data.relational.core.mapping.Table; + +@Table("statements") +public class Statement { + // This model is actually accessed via a multi-column index, but Spring Data doesn't support it + // while @Id is always required. So, this @Id annotation is a dummy + @Id + @JsonProperty("item_id") + public final int itemId; + + @JsonProperty("order_id") + public final String orderId; + + @JsonProperty("count") + public final int count; + + public Statement(int itemId, String orderId, int count) { + this.itemId = itemId; + this.orderId = orderId; + this.count = count; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/model/StatementDetail.java b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/model/StatementDetail.java new file mode 100644 index 00000000..99c1610d --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/model/StatementDetail.java @@ -0,0 +1,28 @@ +package sample.domain.model; + +import com.fasterxml.jackson.annotation.JsonProperty; + +public class StatementDetail { + @JsonProperty("item_id") + public final int itemId; + + @JsonProperty("item_name") + public final String itemName; + + @JsonProperty("price") + public final int price; + + @JsonProperty("count") + public final int count; + + @JsonProperty("total") + public final int total; + + public StatementDetail(int itemId, String itemName, int price, int count, int total) { + this.itemId = itemId; + this.itemName = itemName; + this.price = price; + this.count = count; + this.total = total; + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/repository/CustomerRepository.java b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/repository/CustomerRepository.java new file mode 100644 index 00000000..8258dd5a --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/repository/CustomerRepository.java @@ -0,0 +1,27 @@ +package sample.domain.repository; + +import com.scalar.db.sql.springdata.ScalarDbRepository; +import java.util.Optional; +import org.springframework.stereotype.Repository; +import org.springframework.transaction.annotation.Transactional; +import sample.domain.model.Customer; + +@Transactional +@Repository +public interface CustomerRepository extends ScalarDbRepository { + + default Customer getById(int id) { + Optional entity = findById(id); + if (!entity.isPresent()) { + // If the customer info the specified customer ID doesn't exist, throw an exception + throw new RuntimeException(String.format("Customer not found. id:%d", id)); + } + return entity.get(); + } + + default void insertIfNotExists(Customer customer) { + if (!findById(customer.customerId).isPresent()) { + insert(customer); + } + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/repository/ItemRepository.java b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/repository/ItemRepository.java new file mode 100644 index 00000000..db22e2dc --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/repository/ItemRepository.java @@ -0,0 +1,26 @@ +package sample.domain.repository; + +import com.scalar.db.sql.springdata.ScalarDbRepository; +import java.util.Optional; +import org.springframework.stereotype.Repository; +import org.springframework.transaction.annotation.Transactional; +import sample.domain.model.Item; + +@Transactional +@Repository +public interface ItemRepository extends ScalarDbRepository { + + default Item getById(int id) { + Optional entity = findById(id); + if (!entity.isPresent()) { + throw new RuntimeException(String.format("Item not found. id:%d", id)); + } + return entity.get(); + } + + default void insertIfNotExists(Item item) { + if (!findById(item.itemId).isPresent()) { + insert(item); + } + } +} diff --git a/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/repository/OrderRepository.java b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/repository/OrderRepository.java new file mode 100644 index 00000000..2b31505e --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/repository/OrderRepository.java @@ -0,0 +1,23 @@ +package sample.domain.repository; + +import com.scalar.db.sql.springdata.ScalarDbRepository; +import java.util.List; +import java.util.Optional; +import org.springframework.stereotype.Repository; +import org.springframework.transaction.annotation.Transactional; +import sample.domain.model.Order; + +@Transactional +@Repository +public interface OrderRepository extends ScalarDbRepository { + + default Order getById(String id) { + Optional entity = findById(id); + if (!entity.isPresent()) { + throw new RuntimeException(String.format("Order not found. id:%s", id)); + } + return entity.get(); + } + + List findAllByCustomerIdOrderByTimestampDesc(int customerId); +} diff --git a/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/repository/StatementRepository.java b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/repository/StatementRepository.java new file mode 100644 index 00000000..72fec45f --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/src/main/java/sample/domain/repository/StatementRepository.java @@ -0,0 +1,14 @@ +package sample.domain.repository; + +import com.scalar.db.sql.springdata.ScalarDbRepository; +import java.util.List; +import org.springframework.stereotype.Repository; +import org.springframework.transaction.annotation.Transactional; +import sample.domain.model.Statement; + +@Transactional +@Repository +public interface StatementRepository extends ScalarDbRepository { + + List findAllByOrderId(String orderId); +} diff --git a/docs/3.12/scalardb-samples/spring-data-sample/src/main/resources/application.properties b/docs/3.12/scalardb-samples/spring-data-sample/src/main/resources/application.properties new file mode 100644 index 00000000..acef80a9 --- /dev/null +++ b/docs/3.12/scalardb-samples/spring-data-sample/src/main/resources/application.properties @@ -0,0 +1,11 @@ +# Suppress warnings (e.g. "You listed localhost/127.0.0.1:9042 in your contact points, but it wasn't found in the control host's system.peers at startup") from com.datastax.driver +logging.level.root=ERROR +spring.datasource.driver-class-name=com.scalar.db.sql.jdbc.SqlJdbcDriver +spring.datasource.url=jdbc:scalardb:\ +?scalar.db.sql.connection_mode=direct\ +&scalar.db.storage=cassandra\ +&scalar.db.contact_points=localhost\ +&scalar.db.username=cassandra\ +&scalar.db.password=cassandra\ +&scalar.db.consensus_commit.isolation_level=SERIALIZABLE\ +&scalar.db.sql.default_namespace_name=sample diff --git a/docs/3.12/scalardb-server.md b/docs/3.12/scalardb-server.md new file mode 100644 index 00000000..9c8b90ad --- /dev/null +++ b/docs/3.12/scalardb-server.md @@ -0,0 +1,165 @@ +# ScalarDB Server + +ScalarDB Server is a gRPC server that implements ScalarDB interface. +With ScalarDB Server, you can use ScalarDB features from multiple programming languages that are supported by gRPC. + +Currently, we provide only a Java client officially, and we will support other language clients officially in the future. +Of course, you can generate language-specific client stubs by yourself. +However, note that it is not necessarily straightforward to implement a client since it's using a bidirectional streaming RPC in gRPC, and you need to be familiar with it. + +This document explains how to install and use ScalarDB Server. + +## Install prerequisites + +ScalarDB Server is written in Java. So the following software is required to run it. + +* [Oracle JDK 8](https://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html) (OpenJDK 8) or higher + +## Install ScalarDB Server + +We have Docker images in [our repository](https://github.com/orgs/scalar-labs/packages/container/package/scalardb-server) and zip archives of ScalarDB Server available in [releases](https://github.com/scalar-labs/scalardb/releases). + +If you are interested in building from source, run the following command: + +```shell +$ ./gradlew installDist +``` + +Of course, you can archive the jar and libraries by `./gradlew distZip` and so on. + +## Configure ScalarDB Server + +You need a property file holding the configuration for ScalarDB Server. +The property file must contain two sections: ScalarDB Server configurations and transaction manager configurations. + +```properties +# +# ScalarDB Server configurations +# + +# Port number of ScalarDB Server. The default is `60051`. +scalar.db.server.port=60051 + +# Prometheus exporter port. Prometheus exporter will not be started if a negative number is given. The default is `8080`. +scalar.db.server.prometheus_exporter_port=8080 + +# The maximum message size allowed to be received. If not specified, use the gRPC default value. +scalar.db.server.grpc.max_inbound_message_size= + +# The maximum size of metadata allowed to be received. If not specified, use the gRPC default value. +scalar.db.server.grpc.max_inbound_metadata_size= + +# The decommissioning duration in seconds. The default is `30`. +scalar.db.server.decommissioning_duration_secs=30 + +# +# Transaction manager configurations +# + +# Transaction manager implementation. The default is `consensus-commit`. +scalar.db.transaction_manager=consensus-commit + +# Storage implementation used for Consensus Commit. The default is `cassandra`. +scalar.db.storage=cassandra + +# Comma-separated contact points. +scalar.db.contact_points=localhost + +# Port number for all the contact points. +#scalar.db.contact_port= + +# Credential information to access the database. +scalar.db.username=cassandra +scalar.db.password=cassandra + +# Isolation level used for Consensus Commit. Either `SNAPSHOT` or `SERIALIZABLE` can be specified. The default is `SNAPSHOT`. +scalar.db.consensus_commit.isolation_level=SNAPSHOT + +# Serializable strategy used for Consensus Commit. +# Either `EXTRA_READ` or `EXTRA_WRITE` can be specified. The default is `EXTRA_READ`. +# If `SNAPSHOT` is specified in the property `scalar.db.consensus_commit.isolation_level`, this is ignored. +scalar.db.consensus_commit.serializable_strategy= +``` + +For details about transaction manager configurations, see [ScalarDB Configurations](configurations.md). + +## Start ScalarDB Server + +### Docker images + +For Docker images, you need to pull the ScalarDB Server image first: +```shell +$ docker pull ghcr.io/scalar-labs/scalardb-server: +``` + +And then, you can start ScalarDB Server with the following command: +```shell +$ docker run -v :/scalardb/server/database.properties.tmpl -d -p 60051:60051 -p 8080:8080 ghcr.io/scalar-labs/scalardb-server: +``` + +You can also start it with DEBUG logging as follows: +```shell +$ docker run -v :/scalardb/server/database.properties.tmpl -e SCALAR_DB_LOG_LEVEL=DEBUG -d -p 60051:60051 -p 8080:8080 ghcr.io/scalar-labs/scalardb-server: +```` + +You can also start it with your custom log configuration as follows: +```shell +$ docker run -v :/scalardb/server/database.properties.tmpl -v :/scalardb/server/log4j2.properties.tmpl -d -p 60051:60051 -p 8080:8080 ghcr.io/scalar-labs/scalardb-server: +``` + +You can also start it with environment variables as follows: +```shell +$ docker run --env SCALAR_DB_CONTACT_POINTS=cassandra --env SCALAR_DB_CONTACT_PORT=9042 --env SCALAR_DB_USERNAME=cassandra --env SCALAR_DB_PASSWORD=cassandra --env SCALAR_DB_STORAGE=cassandra -d -p 60051:60051 -p 8080:8080 ghcr.io/scalar-labs/scalardb-server: +``` + +You can also start it with JMX as follows: +```shell +$ docker run -v :/scalardb/server/database.properties.tmpl -e JAVA_OPTS="-Dlog4j.configurationFile=file:log4j2.properties -Djava.rmi.server.hostname= -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.local.only=false -Dcom.sun.management.jmxremote.port=9990 -Dcom.sun.management.jmxremote.rmi.port=9990 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false" -d -p 60051:60051 -p 8080:8080 -p 9990:9990 ghcr.io/scalar-labs/scalardb-server: +``` + +### Zip archives + +For zip archives, you can start ScalarDB Server with the following commands: + +```shell +$ unzip scalardb-server-.zip +$ cd scalardb-server- +$ export JAVA_OPTS="" +$ bin/scalardb-server --config +``` + +## Usage of the Java client of ScalarDB Server + +You can use the Java client of ScalarDB Server in almost the same way as other storages/databases. +The difference is that you need to set `scalar.db.transaction_manager` to `grpc` in your client side property file. + +```properties +# Transaction manager implementation. +scalar.db.transaction_manager=grpc + +# Comma-separated contact points. +scalar.db.contact_points= + +# Port number for all the contact points. +scalar.db.contact_port=60051 + +# The deadline duration for gRPC connections. The default is `60000` milliseconds (60 seconds). +scalar.db.grpc.deadline_duration_millis=60000 + +# The maximum message size allowed for a single gRPC frame. If not specified, use the gRPC default value. +scalar.db.grpc.max_inbound_message_size= + +# The maximum size of metadata allowed to be received. If not specified, use the gRPC default value. +scalar.db.grpc.max_inbound_metadata_size= +``` + +## Further reading + +Please see the following sample to learn ScalarDB Server further: + +- [ScalarDB Server Sample](https://github.com/scalar-labs/scalardb-samples/tree/main/scalardb-server-sample) + +Please also see the following documents to learn how to deploy ScalarDB Server: + +- [Deploy ScalarDB Server on AWS](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/ManualDeploymentGuideScalarDBServerOnEKS.md) +- [Deploy ScalarDB Server on Azure](https://github.com/scalar-labs/scalar-kubernetes/blob/master/docs/ManualDeploymentGuideScalarDBServerOnAKS.md) \ No newline at end of file diff --git a/docs/3.12/scalardb-sql/add-scalardb-sql-to-your-build.md b/docs/3.12/scalardb-sql/add-scalardb-sql-to-your-build.md new file mode 100644 index 00000000..5c0cc0dc --- /dev/null +++ b/docs/3.12/scalardb-sql/add-scalardb-sql-to-your-build.md @@ -0,0 +1,154 @@ +# Add ScalarDB SQL to Your Build + +The ScalarDB SQL libraries are available on the [Maven Central Repository](https://mvnrepository.com/artifact/com.scalar-labs/scalardb-sql) and as [packages on GitHub](https://github.com/orgs/scalar-labs/packages?repo_name=scalardb-sql). You can add the libraries as a build dependency to your application by using Gradle or Maven. + +{% capture notice--warning %} +**Attention** + +You must have a commmercial license and permission to access the ScalarDB SQL libraries. If you need a commercial license, please [contact us](https://scalar-labs.com/contact_us/). +{% endcapture %} + +
{{ notice--warning | markdownify }}
+ +## Configure your application based on your build tool + +Select your build tool, and follow the instructions to add the build dependency for ScalarDB SQL for your application. + +
+
+ + +
+ +
+ +The following instructions describe how to add the build dependency for ScalarDB SQL to your application by using Gradle. For details about using package repositories with Gradle, see [Working with the Gradle registry](https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-gradle-registry). + +## Configure your GitHub credentials for Gradle +{:.no_toc} + +Before adding the build dependency for ScalarDB SQL to your application by using Gradle, you need to configure your GitHub credentials to access the package repository. + +To access the dependency on GitHub, add the following to `build.gradle` in your application: + +```gradle +repositories { + maven { + url = uri("https://maven.pkg.github.com/scalar-labs/scalardb-sql") + credentials { + username = project.findProperty("gpr.user") ?: System.getenv("USERNAME") + password = project.findProperty("gpr.key") ?: System.getenv("TOKEN") + } + } +} +``` + +To configure the `gpr.user` property for your GitHub username and the `gpr.key` property for your personal access token, do one of the following: + +- **Store your GitHub credentials as properties in `~/.gradle/gradle.properties`** + - Open `~/.gradle/gradle.properties`, and store your GitHub credentials as properties by running the following command, replacing `` with your username and `` with a personal access token: + ```shell + $ ./gradlew build -Pgpr.user= -Pgpr.key= + ``` + +- **Store your GitHub credentials as environment variables** + 1. Open a terminal window, and store your GitHub username as an environment variable by running the following command, replacing `` with your username: + ```shell + $ export USERNAME= + ``` + 1. Store your GitHub personal access token as an environment variable by running the following command, replacing `` with a personal access token: + ```shell + $ export TOKEN= + ``` + +## Add the build dependency for ScalarDB SQL by using Gradle +{:.no_toc} + +After specifying your GitHub credentials, add the following ScalarDB SQL dependency to `build.gradle` in your application, replacing `` with the version of ScalarDB SQL that you want to use: + +```gradle +dependencies { + // For Direct mode + implementation 'com.scalar-labs:scalardb-sql-direct-mode:' + + // For Server mode + implementation 'com.scalar-labs:scalardb-sql-server-mode:' +} +``` + +
+
+ +The following instructions describe how to add the build dependency for ScalarDB SQL to your application by using Maven. For details about using package repositories with Maven, see [Working with the Apache Maven registry](https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-apache-maven-registry) + +## Configure your GitHub credentials for Maven +{:.no_toc} + +Before adding the build dependency for ScalarDB SQL to your application by using Maven, you need to configure your GitHub credentials to access the package repository. + +To access the dependency on GitHub, add the following to `~/.m2/settings.xml` in your application, replacing `` with your username and `` with a personal access token in the child `server` in the `servers` tag: + +```xml + + + + github + + + + + github + + + central + https://repo1.maven.org/maven2 + + + github + https://maven.pkg.github.com/scalar-labs/scalardb-sql + + true + + + + + + + + + github + + + + + +``` + +## Add the build dependency for ScalarDB SQL by using Maven +{:.no_toc} + +After specifying your GitHub credentials, add the following ScalarDB SQL dependency to `pom.xml` in your application, replacing `` with the version of ScalarDB SQL that you want to use: + +```xml + + + + com.scalar-labs + scalardb-sql-direct-mode + + + + + + com.scalar-labs + scalardb-sql-server-mode + + + +``` + +
+
diff --git a/docs/3.12/scalardb-sql/command-line-interface.md b/docs/3.12/scalardb-sql/command-line-interface.md new file mode 100644 index 00000000..9d2bad1e --- /dev/null +++ b/docs/3.12/scalardb-sql/command-line-interface.md @@ -0,0 +1,59 @@ +# ScalarDB SQL Command Line Interface + +Like other SQL databases, ScalarDB SQL also provides a command-line interface tool where you can issue SQLs interactively in a command-line shell. + +This document explains how to install and use the ScalarDB SQL Command Line Interface. + +## Install + +We have Docker images in [our repository](https://github.com/orgs/scalar-labs/packages/container/package/scalardb-sql-cli) and executable jars available in [releases](https://github.com/scalar-labs/scalardb-sql/releases). + +If you are interested in building from source, run the following command: + +```shell +$ ./gradlew cli:shadowjar +``` + +The executable jar (`scalardb-sql-cli--all.jar`) is created in `cli/build/libs/`. + +## Run + +### From Docker + +You can start the command line interface with docker as follows: +```shell +$ docker pull ghcr.io/scalar-labs/scalardb-sql-cli: +$ docker run --rm -it -v :/database.properties ghcr.io/scalar-labs/scalardb-sql-cli: --config /database.properties +``` + +You can use the same configuration of ScalarDB SQL. +Please refer to [ScalarDB SQL Configurations](configurations.md) for more details on the configurations of this tool. + +### From executable jar + +You can also start the command line interface with the following command: +```shell +$ java -jar scalardb-sql-cli--all.jar --config +``` + +## Usage + +You can see the command line interface usage with the `-h` option as follows: + +```shell +$ java -jar scalardb-sql-cli--all.jar -h +Usage: java -jar scalardb-sql-cli--all.jar [-hs] -c=PROPERTIES_FILE + [-e=COMMAND] [-f=FILE] [-l=LOG_FILE] [-o=] +Starts ScalarDB SQL CLI. + -c, --config=PROPERTIES_FILE + A configuration file in properties format. + -e, --execute=COMMAND A command to execute. + -f, --file=FILE A script file to execute. + -h, --help Display this help message. + -l, --log=LOG_FILE A file to write output. + -o, --output-format= + Format mode for result display. You can specify + table/vertical/csv/tsv/xmlattrs/xmlelements/json/ans + iconsole. + -s, --silent Reduce the amount of informational messages displayed. +``` diff --git a/docs/3.12/scalardb-sql/configurations.md b/docs/3.12/scalardb-sql/configurations.md new file mode 100644 index 00000000..2eb02833 --- /dev/null +++ b/docs/3.12/scalardb-sql/configurations.md @@ -0,0 +1,171 @@ +# ScalarDB SQL Configurations + +This document explains the configurations of ScalarDB SQL. + +## Client side configurations + +The ScalarDB SQL client-side library offers two connection modes: *Direct* and *Server*. +With Direct mode, the library directly uses the ScalarDB API. +On the other hand, with Server mode, the library uses the ScalarDB API indirectly through [ScalarDB SQL Server](sql-server.md). + +ScalarDB SQL has Direct mode–specific configurations and Server mode–specific configurations. +The following sections explain the common configurations for the two connection modes, the Direct mode–specific configurations, and the Server mode–specific configurations. + +### Common configurations + +The common configurations for the connection modes (Direct mode and Server mode) are as follows: + +| Name | Description | Default | +|------------------------------------------|----------------------------------------------------------------------------------------------------------|---------------| +| `scalar.db.sql.connection_mode` | Connection mode. `DIRECT` or `SERVER` can be set. | | +| `scalar.db.sql.default_transaction_mode` | Default transaction mode. `TRANSACTION` or `TWO_PHASE_COMMIT_TRANSACTION` can be set. | `TRANSACTION` | +| `scalar.db.sql.default_namespace_name` | Default namespace name. If you don't specify a namespace name in your SQL statement, this value is used. | | + +If you don't specify the connection mode and if you have only one dependency on the connection mode, the connection mode will be used. + +### Configurations for Direct mode + +The configurations for Direct mode are as follows: + +| Name | Description | Default | +|-----------------------------------------|--------------------------------------|---------| +| `scalar.db.sql.statement_cache.enabled` | Enable the statement cache. | `false` | +| `scalar.db.sql.statement_cache.size` | Maximum number of cached statements. | `100` | + +Note that in Direct mode, you need to configure the transaction manager, as well. +For details about configurations for the transaction manager, see [Transaction manager configurations](https://github.com/scalar-labs/scalardb/blob/master/docs/configurations.md#transaction-manager-configurations). + +In addition, for details about ScalarDB SQL Server, see [ScalarDB SQL Server](sql-server.md). + +### Configurations for Server mode + +The configurations for Server mode are as follows: + +| Name | Description | Default | +|-------------------------------------------------------|--------------------------------------------------------|------------------------| +| `scalar.db.sql.server_mode.host` | Host name for ScalarDB SQL Server to connect to. | `false` | +| `scalar.db.sql.server_mode.port` | Port number for ScalarDB SQL Server. | `60052` | +| `scalar.db.sql.server_mode.deadline_duration_millis` | Deadline duration (milliseconds) for gRPC connections. | `60000` (60 seconds) | +| `scalar.db.sql.server_mode.max_inbound_message_size` | Maximum message size allowed for a single gRPC frame. | The gRPC default value | +| `scalar.db.sql.server_mode.max_inbound_metadata_size` | Maximum size of metadata allowed to be received. | The gRPC default value | + +## ScalarDB SQL Server Configurations + +ScalarDB SQL Server is a gRPC server that implements the ScalarDB SQL interface. +This section explains the ScalarDB SQL Server configurations. + +In addition to the configurations described in [Transaction manager configurations](https://github.com/scalar-labs/scalardb/blob/master/docs/configurations.md#transaction-manager-configurations) and [Other configurations](https://github.com/scalar-labs/scalardb/blob/master/docs/configurations.md#other-configurations), the following configurations are available for ScalarDB SQL Server: + +| Name | Description | Default | +|--------------------------------------------------|--------------------------------------------------|------------------------| +| `scalar.db.sql.server.port` | Port number for the gRPC server. | `60052` | +| `scalar.db.sql.server.prometheus_exporter_port` | Port number for the Prometheus exporter. | `8080` | +| `scalar.db.sql.server.max_inbound_message_size` | Maximum message size allowed to be received. | The gRPC default value | +| `scalar.db.sql.server.max_inbound_metadata_size` | Maximum size of metadata allowed to be received. | The gRPC default value | + +For details about ScalarDB SQL Server, see [ScalarDB SQL Server](sql-server.md). + +## Configuration examples + +This section shows several configuration examples. + +### Example 1 + +``` +[App (ScalarDB SQL Library (Direct mode))] ---> [Underlying storage/database] +``` + +In this configuration, the app (ScalarDB SQL Library) connects to the underlying storage/database (in this case, Cassandra) directly. +Note that this configuration exists only for development purposes and is not recommended for production use. +This is because the app needs to implement the [scalar-admin](https://github.com/scalar-labs/scalar-admin) interface to take transactionally consistent backups for ScalarDB, which requires an extra burden for users. + +In this case, an example of configurations in the app is as follows: + +```properties +# +# ScalarDB SQL client configurations (Direct mode) +# + +# Connection mode. +scalar.db.sql.connection_mode=DIRECT + +# Enable the statement cache. +scalar.db.sql.statement_cache.enabled=true + +# Maximum number of cached statements. +scalar.db.sql.statement_cache.size=300 + +# +# Transaction manager configurations +# + +# Transaction manager implementation. +scalar.db.transaction_manager=consensus-commit + +# Storage implementation. +scalar.db.storage=cassandra + +# Comma-separated contact points. +scalar.db.contact_points= + +# Credential information to access the database. +scalar.db.username= +scalar.db.password= +``` + +### Example 2 + +``` +[App (ScalarDB SQL Library (Server mode))] ---> [ScalarDB SQL Server] ---> [Underlying storage/database] +``` + +In this configuration, the app (ScalarDB SQL Library) connects to an underlying storage/database through ScalarDB SQL Server. +This configuration is recommended for production use because ScalarDB SQL Server implements the [scalar-admin](https://github.com/scalar-labs/scalar-admin) interface, which enables you to take transactionally consistent backups for ScalarDB by pausing ScalarDB SQL Server. + +In this case, an example of configurations for the app is as follows: + +```properties +# +# ScalarDB SQL client configurations (Server mode) +# + +# Connection mode. +scalar.db.sql.connection_mode=SERVER + +# Host name for ScalarDB SQL Server. +scalar.db.sql.server_mode.host= + +# Port number for ScalarDB SQL Server. +scalar.db.sql.server_mode.port= +``` + +And an example of configurations for ScalarDB SQL Server is as follows: + +```properties +# +# ScalarDB SQL Server configurations +# + +# Enable the statement cache. +scalar.db.sql.statement_cache.enabled=true + +# Maximum number of cached statements. +scalar.db.sql.statement_cache.size=300 + +# +# Transaction manager configurations +# + +# Transaction manager implementation. +scalar.db.transaction_manager=consensus-commit + +# Storage implementation. +scalar.db.storage=cassandra + +# Comma-separated contact points. +scalar.db.contact_points= + +# Credential information to access the database. +scalar.db.username= +scalar.db.password= +``` diff --git a/docs/3.12/scalardb-sql/grammar.md b/docs/3.12/scalardb-sql/grammar.md new file mode 100644 index 00000000..c30d998c --- /dev/null +++ b/docs/3.12/scalardb-sql/grammar.md @@ -0,0 +1,2336 @@ +# ScalarDB SQL Grammar + +- DDL + - [CREATE NAMESPACE](#create-namespace) + - [CREATE TABLE](#create-table) + - [CREATE INDEX](#create-index) + - [TRUNCATE TABLE](#truncate-table) + - [DROP INDEX](#drop-index) + - [DROP TABLE](#drop-table) + - [DROP NAMESPACE](#drop-namespace) + - [CREATE COORDINATOR TABLES](#create-coordinator-tables) + - [TRUNCATE COORDINATOR TABLES](#truncate-coordinator-tables) + - [DROP COORDINATOR TABLES](#truncate-coordinator-tables) + - [ALTER TABLE](#alter-table) +- DML + - [SELECT](#select) + - [INSERT](#insert) + - [UPSERT](#upsert) + - [UPDATE](#update) + - [DELETE](#delete) +- DCL + - [CREATE USER](#create-user) + - [ALTER USER](#alter-user) + - [DROP USER](#drop-user) + - [GRANT](#grant) + - [REVOKE](#revoke) +- Others + - [USE](#use) + - [BEGIN](#begin) + - [START TRANSACTION](#start-transaction) + - [JOIN](#join) + - [PREPARE](#prepare) + - [VALIDATE](#validate) + - [COMMIT](#commit) + - [ROLLBACK](#rollback) + - [ABORT](#abort) + - [SET MODE](#set-mode) + - [SHOW TABLES](#show-tables) + - [DESCRIBE](#describe) + - [SUSPEND](#suspend) + - [RESUME](#resume) + +## DDL + +### CREATE NAMESPACE + +Before creating tables, namespaces must be created since a table belongs to one namespace. +The `CREATE NAMESPACE` command creates a namespace. + +#### Grammar + +```sql +CREATE NAMESPACE [IF NOT EXISTS] [WITH creation_options] + +creation_options: