diff --git a/.github/vale/dicts/aiven.dic b/.github/vale/dicts/aiven.dic index 5c8d0c9f5f..d0897fad6d 100644 --- a/.github/vale/dicts/aiven.dic +++ b/.github/vale/dicts/aiven.dic @@ -10,9 +10,12 @@ Apache API APIs ARN +ARNs Astacus Auth0 autojoin +autoscaler +autoscaling autovacuum Avro AZ @@ -69,9 +72,11 @@ Docker dockerized downsampled downsampling +Dragonfly DSBulk DZone Elasticsearch +enablement Epicurious etcd eval @@ -160,6 +165,7 @@ MongoDB MySQL myhoard namespace/MS +netmask NodeJS nosqlbench npm @@ -208,6 +214,7 @@ rebalance rebalances rebalancing Redis +Redis OSS redis_timeout Redli refcard @@ -228,6 +235,7 @@ Savepoints savepoints schemaless scriptable +SDKs? Security serde serializer @@ -275,6 +283,7 @@ UDFs unassign uncomment unencrypted +unfollow United States Unschedule untrusted diff --git a/.github/vale/styles/Aiven/capitalization_headings.yml b/.github/vale/styles/Aiven/capitalization_headings.yml index de9c444323..8fdf5d8f5e 100644 --- a/.github/vale/styles/Aiven/capitalization_headings.yml +++ b/.github/vale/styles/Aiven/capitalization_headings.yml @@ -26,6 +26,7 @@ exceptions: - Business - BYOC - Cassandra + - CLI - ClickHouse - CloudWatch - Cloud Logging diff --git a/.github/vale/styles/Aiven/first_Dragonfly_is_registered.yml b/.github/vale/styles/Aiven/first_Dragonfly_is_registered.yml new file mode 100644 index 0000000000..d638e5e7ba --- /dev/null +++ b/.github/vale/styles/Aiven/first_Dragonfly_is_registered.yml @@ -0,0 +1,8 @@ +extends: conditional +message: "At least one '%s' must be marked as ®" +level: error +scope: text +ignorecase: false + +first: '\b(Dragonfly)(?!®)' +second: '(Dragonfly)(?:®)' diff --git a/.github/workflows/advanced-params-dragonfly.yaml b/.github/workflows/advanced-params-dragonfly.yaml new file mode 100644 index 0000000000..07db5a4f81 --- /dev/null +++ b/.github/workflows/advanced-params-dragonfly.yaml @@ -0,0 +1,39 @@ +name: Dragonfly - Create PR to Update Advanced parameters + +on: + schedule: + - cron: "0 6 * * 2" + workflow_dispatch: + +jobs: + advanced_params_dragonfly: + runs-on: ubuntu-latest + steps: + - name: Checkout the repo + uses: actions/checkout@v2 + + - name: Set up Python 3.8 + uses: actions/setup-python@v2 + with: + python-version: "3.8" + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + + - name: Dragonfly - Update Advanced parameters + run: make service-type-config-dragonfly + + - name: Create Pull Request + uses: peter-evans/create-pull-request@v4 + with: + commit-message: Dragonfly - Update Advanced parameters + committer: GitHub + author: GitHub + title: Dragonfly - Update Advanced parameters + body: Dragonfly - update advanced parameters file + base: main + branch: dragonfly-update-advanced-params + labels: dragonfly, automated-pr + delete-branch: true diff --git a/.github/workflows/cloud-list.yaml b/.github/workflows/cloud-list.yaml index cf15b180e6..1925de8223 100644 --- a/.github/workflows/cloud-list.yaml +++ b/.github/workflows/cloud-list.yaml @@ -2,7 +2,7 @@ name: Cloud - Create PR to update available list on: schedule: - - cron: "0 6 * * 2" + - cron: "0 6 * * *" workflow_dispatch: jobs: @@ -12,10 +12,10 @@ jobs: - name: Checkout the repo uses: actions/checkout@v2 - - name: Set up Python 3.8 - uses: actions/setup-python@v2 + - name: Set up Python 3.11 + uses: actions/setup-python@v4 with: - python-version: "3.8" + python-version: "3.11" - name: Install dependencies run: | diff --git a/.github/workflows/linkcheck-changed-files.yaml b/.github/workflows/linkcheck-changed-files.yaml index 9f3f095c7f..c006d5bda9 100644 --- a/.github/workflows/linkcheck-changed-files.yaml +++ b/.github/workflows/linkcheck-changed-files.yaml @@ -17,7 +17,7 @@ jobs: - name: Get changed files id: changed-files - uses: tj-actions/changed-files@v24 + uses: tj-actions/changed-files@v41 - name: Set up Python uses: actions/setup-python@v4 diff --git a/1 b/1 deleted file mode 100644 index 48cf41b313..0000000000 --- a/1 +++ /dev/null @@ -1,23 +0,0 @@ -change links - -# Please enter the commit message for your changes. Lines starting -# with '#' will be ignored, and an empty message aborts the commit. -# -# Date: Wed Aug 30 10:47:53 2023 +0200 -# -# On branch redirect-cookbook -# Your branch is up to date with 'origin/redirect-cookbook'. -# -# Changes to be committed: -# modified: docs/products/cassandra/concepts/cross-cluster-replication.rst -# modified: docs/products/cassandra/howto/disable-cross-cluster-replication.rst -# modified: docs/products/cassandra/howto/enable-cross-cluster-replication.rst -# modified: docs/products/cassandra/howto/manage-cross-cluster-replication.rst -# modified: docs/products/clickhouse/howto.rst -# modified: docs/products/clickhouse/howto/list-integrations.rst -# modified: docs/products/kafka/karapace/getting-started.rst -# modified: docs/products/kafka/karapace/howto/enable-karapace.rst -# modified: docs/tools/terraform.rst -# modified: docs/tools/terraform/get-started.rst -# modified: docs/tools/terraform/reference.rst -# diff --git a/Makefile b/Makefile index fba3304903..bc30fd52ab 100644 --- a/Makefile +++ b/Makefile @@ -105,6 +105,9 @@ service-type-config-redis: service-type-config-influxdb: python "$(SOURCEDIR)/scripts/aiven/service_type_config.py" "influxdb" "includes/config-influxdb.rst" +service_type_config-dragonfly: + python "$(SOURCEDIR)/scripts/aiven/service_type_config.py" "dragonfly" "includes/config-dragonfly.rst" + # TODO: add automation for "pg". See https://github.com/aiven/devportal/issues/1026 # (Re)Generate cloud listing diff --git a/_redirects b/_redirects index 08564efddf..38d32d89ae 100644 --- a/_redirects +++ b/_redirects @@ -13,6 +13,7 @@ /api /docs/tools/api /cli /docs/tools/cli.html /terraform /docs/tools/terraform +/community https://aiven.io/community/ # Renamed/deleted files /docs/products/flink/howto/real-time-alerting-solution-cli.html /docs/products/flink/howto/real-time-alerting-solution.html @@ -56,9 +57,16 @@ /docs/products/clickhouse/concepts/databases-and-tables.html /docs/products/clickhouse/howto/manage-databases-tables.html /docs/products/clickhouse/howto/integrate-pg.html /docs/products/clickhouse/howto/integrate-postgresql.html /docs/platform/concepts/byoa.html /docs/platform/concepts/byoc.html -/docs/products/opensearch/howto/list-upgrade /docs/products/opensearch/howto +/docs/products/opensearch/howto/list-upgrade /docs/products/opensearch/howto /docs/products/opensearch/howto/upgrade-to-opensearch /docs/products/opensearch/concepts/opensearch-vs-elasticsearch /docs/tutorials /docs/integrations +/docs/platform/howto/list-support /docs/platform/howto/project-support-center +/docs/platform/howto/change-support-tier /docs/platform/howto/project-support-center +/docs/platform/concepts/service-level-agreement /docs/platform/howto/project-support-center +/docs/products/postgresql/reference/list-of-advanced-params /docs/products/postgresql/reference/advanced-params +/docs/products/kafka/reference/kstream-data-write-issue https://aiven.io/changelog#06-12-2023 +/docs/tools/api/examples /docs/tools/api + # Moved to https://aiven.io/developer /docs/tools/terraform/reference/cookbook https://aiven.io/developer/terraform @@ -73,13 +81,31 @@ /docs/tools/terraform/reference/cookbook/kafka-mongodb-recipe https://aiven.io/developer/apache-kafka-with-mongodb /docs/tools/terraform/reference/cookbook/kafka-debezium-postgres-source https://aiven.io/developer/debezium-source-postgresql-kafka-across-clouds /docs/tools/terraform/reference/cookbook/kafka-topics-http-connector-recipe https://aiven.io/developer/kafka-with-http-sink -/docs/tools/terraform/reference/cookbook/kafka-custom-conf-recipe https://aiven.io/developer/apache-kafka-with-custom-configurations +/docs/tools/terraform/reference/cookbook/kafka-custom-conf-recipe https://aiven.io/developer/apache-kafka-with-custom-configurations /docs/tools/terraform/reference/cookbook/m3db-m3agg-recipe https://aiven.io/developer/m3-aggregator-integration /docs/tools/terraform/reference/cookbook/postgresql-read-replica-recipe https://aiven.io/developer/postgresql-read-only-terraform /docs/tools/terraform/reference/cookbook/clickhouse-access-setup-recipe https://aiven.io/developer/manage-user-privileges-clickhouse-terraform /docs/products/clickhouse/howto/configure-access-terraform-deployed https://aiven.io/developer/manage-user-privileges-clickhouse-terraform /docs/tools/terraform/reference/cookbook/kafka-clickhouse-integration-recipe https://aiven.io/developer/kafka-source-for-clickhouse -/docs/tools/terraform/reference/cookbook/postgres-clickhouse-integration-recipe https://aiven.io/developer/postgresql-source-for-clickhouse +/docs/tools/terraform/reference/cookbook/postgres-clickhouse-integration-recipe https://aiven.io/developer/postgresql-source-for-clickhouse +/docs/community/challenge/catch-the-bus https://aiven.io/community/ +/docs/community/challenge/the-rolling-challenge https://aiven.io/community/ +/docs/tools/cli/account/account-authentication-method /docs/tools/cli/account +/docs/tools/cli/card /docs/tools/cli/account + + +/docs/tools/api/examples /docs/tools/api +/docs/products/postgresql/getting-started /docs/products/postgresql/get-started +/docs/products/m3db/getting-started /docs/products/m3db/get-started +/docs/products/flink/getting-started /docs/products/flink/get-started +/docs/products/kafka/getting-started /docs/products/kafka/get-started +/docs/products/clickhouse/getting-started /docs/products/clickhouse/get-started +/docs/products/opensearch/getting-started /docs/products/opensearch/get-started +/docs/products/kafka/karapace/getting-started /docs/products/kafka/karapace/get-started +/docs/products/kafka/kafka-connect/getting-started /docs/products/kafka/kafka-connect/get-started +/docs/products/opensearch/dashboards/getting-started /docs/products/opensearch/dashboards/get-started +/docs/products/kafka/kafka-mirrormaker/getting-started /docs/products/kafka/kafka-mirrormaker/get-started + # Redirect from .index.html to specific page names for landing diff --git a/_static/images/favicon.ico b/_static/images/favicon.ico index da4d70c94c..5bc7197aa1 100644 Binary files a/_static/images/favicon.ico and b/_static/images/favicon.ico differ diff --git a/_templates/base.html b/_templates/base.html index b1a8183116..bf370b74f9 100644 --- a/_templates/base.html +++ b/_templates/base.html @@ -5,6 +5,9 @@ + + + diff --git a/_toc.yml b/_toc.yml index ad3f40e019..f7400ad5f0 100644 --- a/_toc.yml +++ b/_toc.yml @@ -18,6 +18,7 @@ entries: - file: docs/platform/howto/billing-google-cloud-platform-marketplace-subscription title: Set up Google Cloud Marketplace subscriptions - file: docs/tools/aiven-console + - file: docs/platform/concepts/beta_services - file: docs/platform/howto/feature-preview # -------- ORGANIZATIONS, UNITS, PROJECTS -------- @@ -25,6 +26,7 @@ entries: entries: - file: docs/platform/concepts/projects_accounts_access - file: docs/tools/aiven-console/howto/create-accounts + - file: docs/platform/howto/manage-organizations - file: docs/platform/howto/manage-project - file: docs/platform/reference/project-member-privileges - file: docs/platform/howto/manage-unassigned-projects @@ -68,6 +70,9 @@ entries: title: User and access management entries: - file: docs/platform/howto/manage-org-users + title: Invite and remove organization users + - file: docs/platform/concepts/managed-users + - file: docs/platform/howto/manage-domains - file: docs/platform/howto/delete-user - file: docs/platform/howto/make-super-admin - file: docs/platform/howto/list-user-profile @@ -79,6 +84,8 @@ entries: - file: docs/platform/howto/add-authentication-method - file: docs/platform/reference/password-policy - file: docs/platform/howto/user-2fa + - file: docs/platform/howto/set-authentication-policies + title: Set authentication policies - file: docs/platform/concepts/authentication-tokens - file: docs/platform/howto/create_authentication_token - file: docs/platform/howto/list-saml @@ -124,6 +131,8 @@ entries: - file: docs/platform/howto/prepare-for-high-load - file: docs/platform/howto/scale-services - file: docs/platform/concepts/dynamic-disk-sizing + - file: docs/platform/howto/disk-autoscaler + title: Disk autoscaling - file: docs/platform/howto/cleanup-powered-off-services - file: docs/platform/howto/add-storage-space - file: docs/platform/howto/access-service-logs @@ -140,17 +149,22 @@ entries: - file: docs/platform/reference/list_of_clouds - file: docs/platform/concepts/availability-zones - file: docs/platform/howto/list-byoc - title: BYOC + title: Bring your own cloud (BYOC) entries: - file: docs/platform/concepts/byoc + title: About BYOC + - file: docs/platform/howto/byoc/enable-byoc + title: Enable BYOC - file: docs/platform/howto/byoc/create-custom-cloud title: Create custom cloud - file: docs/platform/howto/byoc/assign-project-custom-cloud - title: Assign projects + title: Attach projects - file: docs/platform/howto/byoc/add-customer-info-custom-cloud title: Add customer contacts - file: docs/platform/howto/byoc/rename-custom-cloud title: Rename custom cloud + - file: docs/platform/howto/byoc/delete-custom-cloud + title: Delete custom cloud - file: docs/platform/concepts/enhanced-compliance-env - file: docs/platform/concepts/aiven-node-firewall-configuration - file: docs/platform/concepts/cloud-security @@ -235,7 +249,6 @@ entries: entries: - glob: docs/tools/cli/account/* - file: docs/tools/cli/billing-group - - file: docs/tools/cli/card - file: docs/tools/cli/cloud - file: docs/tools/cli/credits - file: docs/tools/cli/events @@ -253,8 +266,6 @@ entries: # -------- API -------- - file: docs/tools/api title: Aiven API - entries: - - file: docs/tools/api/examples # -------- TERRAFORM -------- - file: docs/tools/terraform @@ -302,11 +313,12 @@ entries: - file: docs/products/kafka title: Apache Kafka entries: - - file: docs/products/kafka/getting-started + - file: docs/products/kafka/get-started title: Get started - file: docs/products/kafka/howto/fake-sample-data title: Sample data generator - file: docs/products/kafka/concepts + title: Concepts entries: - file: docs/products/kafka/concepts/upgrade-procedure title: Upgrade procedure @@ -404,6 +416,7 @@ entries: - file: docs/products/kafka/howto/kafka-streams-with-aiven-for-kafka - file: docs/products/kafka/howto/flink-with-aiven-for-kafka - file: docs/products/kafka/howto/datadog-customised-metrics + - file: docs/products/kafka/howto/kafka-prometheus-privatelink - file: docs/products/kafka/howto/ksql-docker title: Use ksqlDB with Aiven for Apache Kafka - file: docs/products/kafka/howto/add-missing-producer-consumer-metrics @@ -433,13 +446,11 @@ entries: title: Advanced parameters - file: docs/products/kafka/reference/kafka-metrics-prometheus title: Metrics available via Prometheus - - file: docs/products/kafka/reference/kstream-data-write-issue - title: Resolving Data Write Issues with Kafka Streams 3.6.0 - file: docs/products/kafka/kafka-connect title: Apache Kafka Connect entries: - - file: docs/products/kafka/kafka-connect/getting-started + - file: docs/products/kafka/kafka-connect/get-started - file: docs/products/kafka/kafka-connect/concepts entries: - file: docs/products/kafka/kafka-connect/concepts/list-of-connector-plugins @@ -550,7 +561,7 @@ entries: - file: docs/products/kafka/kafka-mirrormaker title: Apache Kafka MirrorMaker2 entries: - - file: docs/products/kafka/kafka-mirrormaker/getting-started + - file: docs/products/kafka/kafka-mirrormaker/get-started - file: docs/products/kafka/kafka-mirrormaker/concepts entries: - file: docs/products/kafka/kafka-mirrormaker/concepts/disaster-recovery-migration @@ -577,7 +588,7 @@ entries: - file: docs/products/kafka/karapace title: Karapace entries: - - file: docs/products/kafka/karapace/getting-started + - file: docs/products/kafka/karapace/get-started - file: docs/products/kafka/karapace/concepts title: Concepts entries: @@ -611,7 +622,7 @@ entries: title: Plans and pricing - file: docs/products/flink/reference/flink-limitations title: Limitations - - file: docs/products/flink/getting-started + - file: docs/products/flink/get-started title: Quickstart - file: docs/products/flink/concepts title: Concepts @@ -645,11 +656,13 @@ entries: title: Integrate with Apache Kafka - file: docs/products/flink/howto/connect-bigquery title: Integrate with Google BigQuery - - file: docs/products/flink/howto/list-flink-applications - title: Aiven for Apache Flink applications + - file: docs/products/flink/howto/create-flink-applications + title: Apache Flink applications entries: - - file: docs/products/flink/howto/create-flink-applications - title: Create Apache Flink applications + - file: docs/products/flink/howto/create-sql-application + title: Create an SQL application + - file: docs/products/flink/howto/create-jar-application + title: Create a JAR application - file: docs/products/flink/howto/manage-flink-applications title: Manage Apache Flink applications - file: docs/products/flink/howto/list-flink-tables @@ -741,6 +754,8 @@ entries: title: Advanced parameters - file: docs/products/cassandra/reference/cassandra-metrics-prometheus title: Metrics via Prometheus + - file: docs/products/cassandra/reference/cassandra-metrics-datadog + title: Metrics via Datadog # -------- CLICKHOUSE -------- - file: docs/products/clickhouse @@ -757,7 +772,7 @@ entries: title: Plans and pricing - file: docs/products/clickhouse/reference/limitations title: Limits and limitations - - file: docs/products/clickhouse/getting-started + - file: docs/products/clickhouse/get-started title: Quickstart - file: docs/products/clickhouse/concepts title: Concepts @@ -852,8 +867,14 @@ entries: entries: - file: docs/products/clickhouse/reference/supported-table-engines title: Table engines + - file: docs/products/clickhouse/reference/supported-interfaces-drivers + title: Interfaces and drivers - file: docs/products/clickhouse/reference/metrics-list - title: ClickHouse metrics in Grafana + title: Metrics in Grafana + - file: docs/products/clickhouse/reference/clickhouse-metrics-datadog + title: Metrics via Datadog + - file: docs/products/clickhouse/reference/clickhouse-metrics-prometheus + title: Metrics via Prometheus - file: docs/products/clickhouse/reference/supported-table-functions title: Table functions - file: docs/products/clickhouse/reference/s3-supported-file-formats @@ -862,8 +883,48 @@ entries: title: Formats for ClickHouse-Kafka data exchange - file: docs/products/clickhouse/reference/advanced-params title: Advanced parameters + +# ---------Dragonfly -------------- + + - file: docs/products/dragonfly + title: Dragonfly + entries: + - file: docs/products/dragonfly/concepts/overview + title: Overview + - file: docs/products/dragonfly/get-started + title: Quickstart + - file: docs/products/dragonfly/concepts + title: Concepts + entries: + - file: docs/products/dragonfly/concepts/ha-dragonfly + - file: docs/products/dragonfly/howto + title: HowTo + entries: + - file: docs/products/dragonfly/howto/list-code-samples + title: Connect to service + entries: + - file: docs/products/dragonfly/howto/connect-redis-cli + - file: docs/products/dragonfly/howto/connect-go + - file: docs/products/dragonfly/howto/connect-node + - file: docs/products/dragonfly/howto/connect-python + + - file: docs/products/dragonfly/howto/list-migrate-data + title: Data migration + entries: + - file: docs/products/dragonfly/howto/migrate-aiven-redis-df-console + title: Migrate Aiven for Redis + - file: docs/products/dragonfly/howto/migrate-ext-redis-df-console + title: Migrate external Redis + + - file: docs/products/dragonfly/reference + title: Reference + entries: + - file: docs/products/dragonfly/reference/advanced-params + title: Advanced parameters + # -------- GRAFANA -------- + - file: docs/products/grafana title: Grafana entries: @@ -931,7 +992,7 @@ entries: - file: docs/products/m3db title: M3DB entries: - - file: docs/products/m3db/getting-started + - file: docs/products/m3db/get-started title: Get started - file: docs/products/m3db/concepts title: Concepts @@ -1029,7 +1090,7 @@ entries: - file: docs/products/opensearch title: OpenSearch entries: - - file: docs/products/opensearch/getting-started + - file: docs/products/opensearch/get-started title: Quickstart entries: - file: docs/products/opensearch/howto/sample-dataset @@ -1128,7 +1189,7 @@ entries: - file: docs/products/opensearch/dashboards title: OpenSearch Dashboards entries: - - file: docs/products/opensearch/dashboards/getting-started + - file: docs/products/opensearch/dashboards/get-started - file: docs/products/opensearch/dashboards/howto title: HowTo entries: @@ -1155,7 +1216,7 @@ entries: entries: - file: docs/products/postgresql/overview title: Overview - - file: docs/products/postgresql/getting-started + - file: docs/products/postgresql/get-started title: Quickstart - file: docs/products/postgresql/concepts title: Concepts @@ -1224,6 +1285,7 @@ entries: - file: docs/products/postgresql/howto/use-pgvector title: Enable and use pgvector - file: docs/products/postgresql/howto/pg-object-size + - file: docs/products/postgresql/howto/readonly-user - file: docs/products/postgresql/howto/list-replication-migration title: Migrate entries: @@ -1263,7 +1325,7 @@ entries: - file: docs/products/postgresql/reference title: Reference entries: - - file: docs/products/postgresql/reference/list-of-advanced-params + - file: docs/products/postgresql/reference/advanced-params title: Advanced parameters - file: docs/products/postgresql/reference/pg-connection-limits title: Connection limits per plan @@ -1337,20 +1399,6 @@ entries: title: Troubleshoot connection issues # -------- SUPPORT -------- - - file: docs/platform/howto/list-support + - file: docs/platform/howto/project-support-center title: Support - entries: - - file: docs/platform/howto/project-support-center - - file: docs/platform/howto/change-support-tier - title: Upgrade your support tier - - file: docs/platform/concepts/service-level-agreement - - file: docs/platform/concepts/beta_services - # -------- COMMUNITY -------- - - file: docs/community - title: Community - entries: - - file: docs/community/challenge/catch-the-bus - title: Catch the Bus - Aiven challenge with ClickHouse - - file: docs/community/challenge/the-rolling-challenge - title: Rolling - Aiven challenge with Apache Kafka and Apache Flink diff --git a/code/products/flink/slack_notification.md b/code/products/flink/slack_notification.md index c4b29028ab..5393abc50d 100644 --- a/code/products/flink/slack_notification.md +++ b/code/products/flink/slack_notification.md @@ -1,6 +1,6 @@ INSERT INTO SLACK_SINK SELECT - '', + '$CHANNEL_ID', 'host:' || CPU.hostname || ' CPU: ' || cpu || ' avg CPU value:' || TRY_CAST(usage_avg as string) || diff --git a/code/products/flink/slack_sink.md b/code/products/flink/slack_sink.md index bb3dc04d7b..ffa0130e71 100644 --- a/code/products/flink/slack_sink.md +++ b/code/products/flink/slack_sink.md @@ -3,5 +3,5 @@ create table SLACK_SINK ( message STRING ) WITH ( 'connector' = 'slack', - 'token' = '' + 'token' = '$SLACK_TOKEN' ) \ No newline at end of file diff --git a/code/products/mysql/connect.php b/code/products/mysql/connect.php index a2497c4db0..eb07dc51f1 100644 --- a/code/products/mysql/connect.php +++ b/code/products/mysql/connect.php @@ -9,7 +9,7 @@ $conn .= "host=" . $fields["host"]; $conn .= ";port=" . $fields["port"];; $conn .= ";dbname=defaultdb"; -$conn .= ";sslmode=verify-ca;sslrootcert=ca.pem"; +$conn .= ";sslmode=verify-ca;sslrootcert='D:/absolute/path/to/ssl/certs/ca.pem'"; try { $db = new PDO($conn, $fields["user"], $fields["pass"]); diff --git a/conf.py b/conf.py index 7a707298e6..fb70555929 100644 --- a/conf.py +++ b/conf.py @@ -240,79 +240,62 @@ # -- Replacements ----------------------------------------------------------- rst_epilog = """ -.. |icon-challenge-trophy| image:: /images/community/challenge-trophy.svg - :width: 24px - :class: no-scaled-link -.. |icon-twitter| image:: /images/social_media/icon-twitter.svg +.. |icon-postgres| image:: /images/icons/icon-pg.svg :width: 24px :class: no-scaled-link -.. |icon-github| image:: /images/social_media/icon-github.svg +.. |icon-mysql| image:: /images/icons/icon-mysql.svg :width: 24px :class: no-scaled-link -.. |icon-blog| image:: /images/social_media/icon-blog.svg +.. |icon-kafka| image:: /images/icons/icon-kafka.svg :width: 24px :class: no-scaled-link -.. |icon-youtube| image:: /images/social_media/icon-youtube.svg +.. |icon-kafka-connect| image:: /images/icons/icon-kafka-connect.svg :width: 24px :class: no-scaled-link -.. |icon-postgres| image:: /images/icon-pg.svg +.. |icon-kafka-mirrormaker| image:: /images/icons/icon-kafka-mirrormaker.svg :width: 24px :class: no-scaled-link -.. |icon-mysql| image:: /images/icon-mysql.svg +.. |icon-m3db| image:: /images/icons/icon-m3db.svg :width: 24px :class: no-scaled-link -.. |icon-kafka| image:: /images/icon-kafka.svg +.. |icon-influxdb| image:: /images/icons/icon-influxdb.svg :width: 24px :class: no-scaled-link -.. |icon-kafka-connect| image:: /images/icon-kafka-connect.svg +.. |icon-opensearch| image:: /images/icons/icon-opensearch.png :width: 24px :class: no-scaled-link -.. |icon-kafka-mirrormaker| image:: /images/icon-kafka-mirrormaker.svg +.. |icon-cassandra| image:: /images/icons/icon-cassandra.svg :width: 24px :class: no-scaled-link -.. |icon-m3db| image:: /images/icon-m3db.svg +.. |icon-redis| image:: /images/icons/icon-redis.svg :width: 24px :class: no-scaled-link -.. |icon-influxdb| image:: /images/icon-influxdb.svg +.. |icon-grafana| image:: /images/icons/icon-grafana.svg :width: 24px :class: no-scaled-link -.. |icon-opensearch| image:: /images/icon-opensearch.png +.. |icon-flink| image:: /images/icons/icon-flink.svg :width: 24px :class: no-scaled-link -.. |icon-cassandra| image:: /images/icon-cassandra.svg +.. |icon-clickhouse| image:: /images/icons/icon-clickhouse.svg :width: 24px :class: no-scaled-link -.. |icon-redis| image:: /images/icon-redis.svg +.. |icon-dragonfly| image:: /images/icons/icon-dragonfly.svg :width: 24px - :class: no-scaled-link - -.. |icon-grafana| image:: /images/icon-grafana.svg - :width: 24px - :class: no-scaled-link - -.. |icon-flink| image:: /images/icon-flink.svg - :width: 24px - :class: no-scaled-link - -.. |icon-clickhouse| image:: /images/icon-clickhouse.svg - :width: 24px - :class: no-scaled-link - -.. |beta| replace:: :bdg-secondary:`beta` + :class: no-scaled-link .. |preview| replace:: :bdg-secondary:`preview` diff --git a/docs/community.rst b/docs/community.rst deleted file mode 100644 index fc5045921f..0000000000 --- a/docs/community.rst +++ /dev/null @@ -1,80 +0,0 @@ -Aiven community -=============== - -There are lots of other ways to learn more about Aiven, and interact with us. - - -.. grid:: 1 2 2 2 - - .. grid-item-card:: - :shadow: md - :margin: 2 2 0 0 - - |icon-twitter| **Twitter** We love to chat! Tell us what you're building with Aiven! - - .. button-link:: https://twitter.com/aiven_io - :align: right - :color: primary - :outline: - - To the Twitterverse - - .. grid-item-card:: - :shadow: md - :margin: 2 2 0 0 - - |icon-blog| **Blog** To read tech news, tutorials, and updates on what we're up to. - - - .. button-link:: https://aiven.io/blog - :align: right - :color: primary - :outline: - - To the blog - - .. grid-item-card:: - :shadow: md - :margin: 2 2 0 0 - - |icon-youtube| **YouTube** If video is your thing, cool, it's ours too. - - .. button-link:: https://www.youtube.com/c/Aiven_io - :align: right - :color: primary - :outline: - - To YouTube - - .. grid-item-card:: - :shadow: md - :margin: 2 2 0 0 - - |icon-github| **GitHub** Find our public repositories on GitHub, work on `open source `_ with us! - - - .. button-link:: https://github.com/aiven - :align: right - :color: primary - :outline: - - To the code - - - -Check out the `Aiven Console `_ for your Aiven web interface. New accounts get a free trial! - ----------------- - -Documentation contributors --------------------------- - -We love our documentation so much that we created documentation about the documentation! If you want to learn more about the platform, or contribute some changes of your own, everything you need is in the :doc:`/docs/community/documentation` section. - -Mini challenges ---------------- - -Get to know Aiven through our mini challenges, each introducing you to different open source technologies. - -* :doc:`Catch the bus (Clickhouse) ` -* :doc:`Rolling challenge (Apache Kafka & Flink) ` \ No newline at end of file diff --git a/docs/community/challenge/catch-the-bus.rst b/docs/community/challenge/catch-the-bus.rst deleted file mode 100644 index 115be50c0b..0000000000 --- a/docs/community/challenge/catch-the-bus.rst +++ /dev/null @@ -1,26 +0,0 @@ -Catch the bus - Aiven challenge with ClickHouse® -================================================ - -Welcome to Aiven's "Catch the bus" challenge, an easy way for you to explore Aiven for ClickHouse®. - -With the launch of Aiven for ClickHouse® we finally have the "C" for our KFC stack, the best combination of tools for real-time alerting and historic analytics. The stack utilises Apache Kafka®, Apache Flink®, and ClickHouse® to build up a robust, scalable architecture for getting the most from your data, whether it be batched ETL or real-time sensors. - -For this challenge, we'll be using real-time data from `Digitransit `_ to plot maps and find new insights. Digitransit provides real-time bus information and positioning data for busses in Helsinki. We've saved you a few weeks worth of data as a CSV to experiment with. - -Let's dive right in. - -Instructions ------------- -The goal is to find the best bus routes between the different KFC restaurants in Helsinki. - -1. Create an Aiven free trial account: `sign up for free `_. - -2. `Download the dataset `_. - -3. Spin up an Aiven for ClickHouse® service and load the dataset. Please see the `Aiven for ClickHouse® docs `_ for help with these steps. - -4. Find the routes and plot them on a map. Here's a `notebook `_ with a few optional queries and a mapping library to get you started. You can also check out the `ClickHouse docs `_ for advanced SQL information. - -5. If you find the solution or need help, email us at `hacks@Aiven.io `_. - -Good luck! \ No newline at end of file diff --git a/docs/community/challenge/the-rolling-challenge.rst b/docs/community/challenge/the-rolling-challenge.rst deleted file mode 100644 index c97dc30b8f..0000000000 --- a/docs/community/challenge/the-rolling-challenge.rst +++ /dev/null @@ -1,118 +0,0 @@ -Aiven "rolling challenge" with Apache Kafka® and Apache Flink® -============================================================== - -Welcome to Aiven's "Rolling" challenge, an easy way for you to explore Aiven for Apache Kafka® and Aiven for Apache Flink®. - -With the launch of Aiven for Apache Flink® we added a new way to manipulate your Apache Kafka® streaming data via SQL statements, providing the best combination of tools for real-time data transformation. - -For this challenge, we'll be using `Aiven fake data generator on Docker `_ to generate a series of symbols. The challenge consists of understanding the overall meaning of the symbols by transforming the original series of data with Apache Flink. - -Let's dive right in. - - -Instructions ------------- - -The goal is to make sense of the incoming stream of data. - - -1. Create an Aiven free trial account: `sign up for free `_. - -2. Create an :doc:`Aiven for Apache Kafka® ` and :doc:`Aiven for Apache Flink® ` service - -3. Set up an :doc:`integration between the Aiven for Apache Kafka® and Apache Flink® services ` - -4. Create a new :doc:`Aiven authentication token ` - -5. Clone the `Aiven fake data generator on Docker `_ with: - - .. code:: - - git clone https://github.com/aiven/fake-data-producer-for-apache-kafka-docker - -6. Copy the file ``conf/env.conf.sample`` to ``conf/env.conf`` and edit the following parameters - - +----------------+------------------------------------------------------------------------------------------------------------------------------+ - | Parameter Name | Parameter Value | - +================+==============================================================================================================================+ - |PROJECT_NAME | Name of the Aiven Project where the Aiven for Apache Kafka service is running | - +----------------+------------------------------------------------------------------------------------------------------------------------------+ - |SERVICE_NAME | Name of the Aiven for Apache Kafka service running | - +----------------+------------------------------------------------------------------------------------------------------------------------------+ - |TOPIC | Name of the Topic to write messages in. ``rolling`` for the challenge | - +----------------+------------------------------------------------------------------------------------------------------------------------------+ - |PARTITIONS | 5 | - +----------------+------------------------------------------------------------------------------------------------------------------------------+ - |REPLICATION | 2 | - +----------------+------------------------------------------------------------------------------------------------------------------------------+ - |NR_MESSAGES | 0 | - +----------------+------------------------------------------------------------------------------------------------------------------------------+ - |MAX_TIME | 0 | - +----------------+------------------------------------------------------------------------------------------------------------------------------+ - |SUBJECT | ``rolling`` | - +----------------+------------------------------------------------------------------------------------------------------------------------------+ - |USERNAME | Your Aiven account username | - +----------------+------------------------------------------------------------------------------------------------------------------------------+ - |TOKEN | Your Aiven account token | - +----------------+------------------------------------------------------------------------------------------------------------------------------+ - |PRIVATELINK | ``NO`` | - +----------------+------------------------------------------------------------------------------------------------------------------------------+ - |SECURITY | ``SSL`` | - +----------------+------------------------------------------------------------------------------------------------------------------------------+ - -7. Build the Docker image - - .. code:: - - docker build -t fake-data-producer-for-apache-kafka-docker . - -8. Run the Docker image - - .. code:: - - docker run fake-data-producer-for-apache-kafka-docker - -9. Check the fake messages being produced by Docker - -10. In the `Aiven Console `_, navigate to the Aiven for Apache Flink service page - -11. Play with the Aiven for Apache Flink **Application** tab and try to make sense of the data. - - .. Tip:: - - The source table can be mapped in Aiven for Apache Flink with the following SQL, using the ``rolling`` topic as source: - - .. code:: - - - CREATE TABLE ROLLING_IN( - ts BIGINT, - val string, - ts_ltz AS TO_TIMESTAMP_LTZ(ts, 3), - WATERMARK FOR ts_ltz AS ts_ltz - INTERVAL '10' SECOND - ) - WITH ( - 'connector' = 'kafka', - 'properties.bootstrap.servers' = '', - 'topic' = 'rolling', - 'value.format' = 'json', - 'scan.startup.mode' = 'earliest-offset' - ) - - -12. When you find the solution, email a screenshot to hacks@Aiven.io - -Tips ----- - -Some tips that could help in solving the challenge: - -* ``kcat`` is a tool to explore data in Apache Kafka topics, check the :doc:`dedicate documentation ` to understand how to use it with Aiven for Apache Kafka -* ``jq`` is a helpful tool to parse JSON payloads, read `the instructions `_ on how to install and check the following useful flags: - * ``-r`` retrieves the raw output - * ``-j`` doesn't create a new line for every message - * ``-c`` shows data in compact view - -* If you're stuck with visualizing ``kcat`` consumer data with ``jq``, check the `-u` flag `as per dedicated example `_ - - diff --git a/docs/community/documentation/tips-tricks/renaming-files.rst b/docs/community/documentation/tips-tricks/renaming-files.rst index 6ef13e9829..2ea1284a65 100644 --- a/docs/community/documentation/tips-tricks/renaming-files.rst +++ b/docs/community/documentation/tips-tricks/renaming-files.rst @@ -3,7 +3,9 @@ Rename files and adding redirects =================================== -The project supports a redirects file, named ``_redirects``; the format is `source` and `destination` as paths relative to the root of the project. Here's an example:: +The project supports a redirects file, named ``_redirects``; the format is `source` and `destination` as paths relative to the root of the project. Here's an example: + +.. code:: /docs/products/flink/howto/real-time-alerting-solution-cli.html /docs/products/flink/howto/real-time-alerting-solution.html diff --git a/docs/integrations/cloudlogging.rst b/docs/integrations/cloudlogging.rst index f227d960ab..3383b90217 100644 --- a/docs/integrations/cloudlogging.rst +++ b/docs/integrations/cloudlogging.rst @@ -5,9 +5,8 @@ You can send your service logs to Google Cloud Logging to store, search, analyze There are two steps to setting up this integration: -1. Create the Google Cloud Logging integration - -2. Create the integration endpoint +1. Create the Google Cloud Logging integration. +2. Create the integration endpoint. You can do this using either the `Aiven Console `_ or the :doc:`CLI `. @@ -70,14 +69,14 @@ Step 2. Add the integration endpoint to your service 1. Get the endpoint identifier: -.. code-block:: shell + .. code-block:: shell - avn service integration-endpoint-list --project your-project-name + avn service integration-endpoint-list --project your-project-name 2. Use the ``endpoint_id`` to attach the service to the endpoint: -.. code-block:: shell + .. code-block:: shell - avn service integration-create --project your-project-name \ - -t external_google_cloud_logging -s your-service \ - -D + avn service integration-create --project your-project-name \ + -t external_google_cloud_logging -s your-service \ + -D diff --git a/docs/integrations/cloudwatch/cloudwatch-logs-console.rst b/docs/integrations/cloudwatch/cloudwatch-logs-console.rst index 421749d79e..d018e9e422 100644 --- a/docs/integrations/cloudwatch/cloudwatch-logs-console.rst +++ b/docs/integrations/cloudwatch/cloudwatch-logs-console.rst @@ -39,7 +39,7 @@ Send logs from an Aiven service to AWS CloudWatch Follow the steps in this section for each of the services whose logs should be sent to AWS CloudWatch. -1. On the **Service Overview** page, select **Manage integrations** and choose the **AWS CloudWatch Logs** option. +1. On the **Overview** page of your service, select **Integrations** and choose the **Amazon CloudWatch Logs** option. .. image:: /images/integrations/cloudwatch-overview-integrations.png :alt: Screenshot of system integrations including AWS CloudWatch Logs diff --git a/docs/integrations/cloudwatch/cloudwatch-metrics.rst b/docs/integrations/cloudwatch/cloudwatch-metrics.rst index 7b9adb1be2..7cb70a7ac0 100644 --- a/docs/integrations/cloudwatch/cloudwatch-metrics.rst +++ b/docs/integrations/cloudwatch/cloudwatch-metrics.rst @@ -45,7 +45,7 @@ Send metrics from an Aiven service to AWS CloudWatch Follow the steps in this section for each of the services whose metrics should be sent to your AWS CloudWatch. -1. From the **Service Overview** page, select **Manage integrations** and choose the **AWS CloudWatch Metrics** option. +1. From the **Overview** page of your service, select **Integrations** and choose the **Amazon CloudWatch Metrics** option. .. image:: /images/integrations/cloudwatch-overview-integrations.png :alt: Screenshot of system integrations including AWS CloudWatch Metrics diff --git a/docs/integrations/datadog.rst b/docs/integrations/datadog.rst index 11d976d1a8..97f12fc233 100644 --- a/docs/integrations/datadog.rst +++ b/docs/integrations/datadog.rst @@ -1,7 +1,7 @@ Datadog and Aiven ================= -`Datadog `_ is a monitoring platform, allowing you to keep an eye on all aspects of your cloud estate. Aiven has integrations that make it easy to include an Aiven service in your Datadog dashboards. +`Datadog `_ is a monitoring platform, allowing you to keep an eye on all aspects of your cloud estate. Aiven has integrations that make it easy to include an Aiven service in your Datadog dashboards. Datadog for metrics ------------------- diff --git a/docs/integrations/datadog/add-custom-tags-to-datadog.rst b/docs/integrations/datadog/add-custom-tags-to-datadog.rst index ff7be0a117..6da9799b37 100644 --- a/docs/integrations/datadog/add-custom-tags-to-datadog.rst +++ b/docs/integrations/datadog/add-custom-tags-to-datadog.rst @@ -36,11 +36,10 @@ To add tags to the service integration: #. Log in to `Aiven Console `_, and select your service. -#. In the service **Overview** page, scroll to **Service integration** and select **Manage integrations**. +#. On the **Overview** page of your service, navigate to the **Service integrations** section and select **Manage integrations**. #. Next to the Datadog integration listed at the top on the Integrations screen, select **Edit** from the drop-down menu (ellipsis). #. Enter the desired tags in the provided field. You can add multiple tags by selecting the **Add** icon and optionally include descriptions for each tag. #. Select **Save configuration** to apply the changes. - diff --git a/docs/integrations/datadog/datadog-logs.rst b/docs/integrations/datadog/datadog-logs.rst index 3b2e3c1426..14ccd65c97 100644 --- a/docs/integrations/datadog/datadog-logs.rst +++ b/docs/integrations/datadog/datadog-logs.rst @@ -66,7 +66,7 @@ Send logs from an Aiven service to Datadog Follow the steps in this section for each of the services whose logs should be sent to Datadog. -1. From the **Service Overview** page, select **Manage integrations** and choose the **Rsyslog** option. +1. On the **Overview** page of your service, select **Integrations** from the sidebar, and select the **Rsyslog** option. .. image:: /images/integrations/rsyslog-service-integration.png :alt: Screenshot of system integrations including rsyslog diff --git a/docs/integrations/datadog/datadog-metrics.rst b/docs/integrations/datadog/datadog-metrics.rst index a52e2daf79..ddbd9be795 100644 --- a/docs/integrations/datadog/datadog-metrics.rst +++ b/docs/integrations/datadog/datadog-metrics.rst @@ -35,8 +35,8 @@ Add Datadog metrics integration to your Aiven service To enable the Datadog metrics integration for each service that requires metric tracking in Datadog, follow these steps: 1. In the `Aiven Console `_, select your service. -2. In the service **Overview** page, scroll to **Service integrations** and select **Manage integrations**. -3. In the **Integrations** screen, select **Datadog**. +2. From the **Overview** page of your service, scroll to **Service integrations** and select **Manage integrations**. +3. In the **Integrations** screen, select **Datadog Metrics**. 4. Select the Datadog endpoint you want to use from the drop-down list and select **Enable**. diff --git a/docs/integrations/prometheus-system-metrics.rst b/docs/integrations/prometheus-system-metrics.rst index 185c109275..5a8c62d38f 100644 --- a/docs/integrations/prometheus-system-metrics.rst +++ b/docs/integrations/prometheus-system-metrics.rst @@ -15,7 +15,7 @@ Get a list of available service metrics To discover the metrics available for your services, make an HTTP ``GET`` request to your Prometheus service endpoint. -1. Once your Prometheus integration is configured, collect the following Prometheus service details from `Aiven Console `_ > your service's the **Overview** page > the **Connection information** section > the **Prometheus** tab: +1. Once your Prometheus integration is configured, collect the following Prometheus service details from `Aiven Console `_ > the **Overview** page of your service > the **Connection information** section > the **Prometheus** tab: * Prometheus URL * Username diff --git a/docs/integrations/rsyslog.rst b/docs/integrations/rsyslog.rst index db96b2b7f8..3ace0e7b7d 100644 --- a/docs/integrations/rsyslog.rst +++ b/docs/integrations/rsyslog.rst @@ -83,13 +83,10 @@ Optional: Add rsyslog integration to service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -This can be configured in the Aiven Console by navigating to the target -service overview page and then scrolling down to the **Manage integrations** -button. - +This can be configured in the `Aiven Console `_ by navigating to the **Overview** page of the target service > the **Service integrations** section and selecting **Manage integrations**. You should be able to select your previously configured Rsyslog service -integration by clicking **Use integration** in the modal window. +integration by selecting **Enable** in the modal window. .. image:: /images/integrations/rsyslog-service-integration.png :alt: The page that shows the integrations available for a service diff --git a/docs/integrations/rsyslog/loggly.rst b/docs/integrations/rsyslog/loggly.rst index cd7c700d25..3f063c77d3 100644 --- a/docs/integrations/rsyslog/loggly.rst +++ b/docs/integrations/rsyslog/loggly.rst @@ -34,9 +34,9 @@ To create a Loggly integration using the `Aiven Console `_ for up to date information) + * ``NNNNN`` is Loggly Private Enterprise Number (PEN) which is ``41058`` (check `Loggly documentation `_ for up to date information) * ``your-tag`` with any arbitrary tag value wrapped in double quotes .. Tip:: @@ -60,7 +60,7 @@ After enabling this service integration, it will be shown as active in the `Aive .. Note:: - It may take a few moments to setup the new log, and you can track the status in your service overview. + It may take a few moments to setup the new log, and you can track the status on the **Overview** page of your service > the **Service integrations** section. Your logs should now be visible on Loggly **Search** tab. Enter the tag name your previously specified (e.g. ``tag:your-tag`` ) and it will populate the dashboard with the log events from the Aiven service. diff --git a/docs/integrations/rsyslog/logtail.rst b/docs/integrations/rsyslog/logtail.rst index 5d6eb5dc2d..240b5a2f18 100644 --- a/docs/integrations/rsyslog/logtail.rst +++ b/docs/integrations/rsyslog/logtail.rst @@ -1,7 +1,7 @@ Send Aiven logs to Logtail ========================== -`Logtail `_ is a logging service with solid database backing and a cool SQL query interface. You can use the Aiven :doc:`/docs/integrations/rsyslog` to send your logs to Logtail. This article will show you how to set this up. +`Logtail `_ is a logging service with solid database backing and a cool SQL query interface. You can use the Aiven :doc:`/docs/integrations/rsyslog` to send your logs to Logtail. This article will show you how to set this up. 1. Set up an Rsyslog source on Logtail. Choose **Connect source**, give your source a **Name**, and select "Rsyslog" as the **Platform**. diff --git a/docs/integrations/send-logs-to-elasticsearch.rst b/docs/integrations/send-logs-to-elasticsearch.rst index d9c656979e..6e76f24995 100644 --- a/docs/integrations/send-logs-to-elasticsearch.rst +++ b/docs/integrations/send-logs-to-elasticsearch.rst @@ -41,12 +41,11 @@ Send logs to an external service #. Navigate to **Services** from the menu on the left. #. Select the service which logs you want to send to the external Elasticsearch service. -#. On the service *Overview* page scroll to the section *Service integrations*. +#. On the **Overview** page of your service, navigate to the **Service integrations** section. #. Select **Manage integrations**. -#. Select Elasticsearch from the list. -#. In the newly appeared modal window choose the endpoint with name ``CONNECTION_NAME`` from the list and select **ENABLE**. -#. You can now close the modal window. -#. Observe the status change for newly added integration in the section **Service integrations** on the service overview page. +#. Select **Elasticsearch Logs** from the list. +#. In the newly-appeared modal window, select the endpoint with name ``CONNECTION_NAME`` from the list and select **ENABLE**. Close the modal window. +#. Observe the status change for newly-added integration in the **Service integrations** section on the **Overview** page of your service. #. Verify that the logs are flowing into your Elasticsearch. .. note:: Logs are split per day with index name consisting of your desired index prefix and a date in a format year-month-day, for example ``logs-2022-08-30``. diff --git a/docs/platform.rst b/docs/platform.rst deleted file mode 100644 index dd181eafb5..0000000000 --- a/docs/platform.rst +++ /dev/null @@ -1,25 +0,0 @@ -:orphan: - -Aiven platform -================== - -Aiven provides managed open source data technologies on all major clouds. Through Aiven, developers can do what they do best: create applications. Meanwhile, Aiven does what it does best; manage cloud data infrastructure. - -The Aiven platform consists of the :doc:`Aiven Console `, :doc:`tools `, and Aiven services: - -* Apache Kafka® -* Apache Flink® -* Apache Cassandra® -* ClickHouse® -* Grafana® -* InfluxDB® -* M3 -* MySQL -* OpenSearch® -* PostgreSQL® -* Redis®* - -The services are available in more than 80 regions around the world on AWS, GCP, Microsoft Azure, DigitalOcean, and UpCloud. - -:doc:`Get started with the Aiven platform ` - diff --git a/docs/platform/concepts.rst b/docs/platform/concepts.rst deleted file mode 100644 index 93b2a1a251..0000000000 --- a/docs/platform/concepts.rst +++ /dev/null @@ -1,21 +0,0 @@ -:orphan: - -Concepts -======== - -Learn more about the Aiven platform. - -* :doc:`Get started with the Aiven platform ` - -* :doc:`Billing overvieww ` - -* :doc:`Organizations, units, and projects ` - -* :doc:`Manage users in an organization ` - -* :doc:`Monitoring services ` - -* :doc:`Service integrations ` - -* :doc:`Get support ` - diff --git a/docs/platform/concepts/availability-zones.rst b/docs/platform/concepts/availability-zones.rst index 6971d2024f..fce544ea13 100644 --- a/docs/platform/concepts/availability-zones.rst +++ b/docs/platform/concepts/availability-zones.rst @@ -6,7 +6,7 @@ About availability zones Availability zones (AZs) are physically isolated locations (data centers) where cloud services operate. There are multiple AZs within a region, each with independent power, cooling, and network infrastructure. The choice of AZs is usually affected by the latency/proximity to customers, compliance, SLA, redundancy/data security requirements, and cost. All AZs in a region are interconnected for an easy resource replication and application partitioning. -.. _cross-zone data distro: +.. _cross-zone-data-distro: Cross-availability-zone data distribution ----------------------------------------- @@ -62,7 +62,7 @@ Aiven supports a subset of existing Azure cloud regions with availability zones. - ``azure-westeurope`` - ``azure-westus2`` -Related reading +Related pages --------------- - :doc:`PostgreSQL® backups ` diff --git a/docs/platform/concepts/byoc.rst b/docs/platform/concepts/byoc.rst index 32b45c1eea..480f6664da 100644 --- a/docs/platform/concepts/byoc.rst +++ b/docs/platform/concepts/byoc.rst @@ -6,17 +6,19 @@ About BYOC Aiven services are usually deployed on Aiven-managed infrastructure, using Aiven-managed security protocols, and backed by Aiven-managed storage and backups. This provides the most seamless straightforward de-risked approach to deploying Aiven services. However, you might need a different configuration if your business, project, or organization has specific requirements for strict regulatory compliance, fine-grained network access control, or cloud purchase commitments in place, for instance. -This is where the bring your own cloud (BYOC) feature comes in enabling you to use your own cloud infrastructure instead of using the Aiven-managed infrastructure. With BYOC, your Aiven organization gets connected with your cloud provider account by creating custom clouds in your Aiven organization. This allows you to manage your infrastructure on the Aiven platform while keeping your data in your own cloud. +This is where the bring your own cloud (BYOC) feature comes in enabling you to use your own cloud infrastructure instead of using the Aiven-managed infrastructure. With BYOC, your Aiven organization gets connected with your cloud provider account by creating custom clouds in your Aiven organization. A custom cloud is a secure environment within your cloud provider account to run Aiven-managed data services. By enabling BYOC, creating custom clouds, and setting up Aiven services within the custom clouds, you can manage your infrastructure on the Aiven platform while keeping your data in your own cloud. Why use BYOC ------------ -There a few major reasons to utilize BYOC: +Consider using BYOC and custom clouds if you have specific business needs or project requirements. There are a few major reasons to use BYOC: -1. **Compliance**: Aiven offers managed environments for several standard compliance regulations, such as HIPAA, PCI DSS, and GDPR. However, if you have strict regulatory requirements or special compliance requirements, BYOC may be the best option for you. -2. **Network auditing**: If you require the visibility of all traffic within any VPC you operate in or need frequent auditing capabilities, BYOC is potentially a good fit. BYOC gives you the ability to audit network metadata but not the actual contents. -3. **Fine-grained network control**: BYOC requires only some specific network access (for example, service management and troubleshooting), otherwise allowing you to customize your network to meet any internal requirements or requirements of your customers. -4. **Cost optimization**: Depending on your cloud provider, with BYOC you can use cost savings plans, committed use discounts, or other strategies to save on compute and storage infrastructure costs related to Aiven services. +* **Compliance**: Aiven offers managed environments for several standard compliance regulations, such as HIPAA, PCI DSS, and GDPR. However, if you have strict regulatory requirements or special compliance requirements, BYOC may be the best option for you. +* **Network auditing**: If you require the visibility of all traffic within any VPC you operate in or need frequent auditing capabilities, BYOC is potentially a good fit. BYOC gives you the ability to audit network metadata but not the actual contents. +* **Fine-grained network control**: BYOC only requires specific network access for Aiven (for example, service management or troubleshooting) to deploy and manage open source data services, otherwise allowing you to customize your network to meet any internal requirements or requirements of your customers. +* **Cost optimization**: Depending on your cloud provider, with BYOC you can use cost savings plans, committed use discounts, or other strategies to save on compute and storage infrastructure costs related to Aiven services. + +.. _eligible-for-byoc: Who is eligible for BYOC ------------------------ @@ -24,8 +26,8 @@ Who is eligible for BYOC The BYOC setup is a bespoke service offered on a case-by-case basis, and not all cloud providers support it yet. You need to meet a few requirements to be eligible for BYOC: - You use one of the following public clouds: Amazon Web Services (AWS), Google Cloud Platform (GCP), or Microsoft Azure (excluding Azure Germany). -- Your total monthly spend is greater than $5,000. -- You have an active enterprise support contract. +- Your total monthly spend is at least $5,000.00 for at least 12 months. +- You use at least the `Priority tier of Aiven support services `_. When to use the regular Aiven deployment ---------------------------------------- diff --git a/docs/platform/concepts/database-forking.rst b/docs/platform/concepts/database-forking.rst deleted file mode 100644 index 4d14f29436..0000000000 --- a/docs/platform/concepts/database-forking.rst +++ /dev/null @@ -1,10 +0,0 @@ -:orphan: - -.. raw:: html - - - -This page is a redirect from ``database-forking`` to ``service-forking``, since the original name (``database-forking``) was incorrect. -It's part of `PR #710 `_ \ No newline at end of file diff --git a/docs/platform/concepts/enhanced-compliance-env.rst b/docs/platform/concepts/enhanced-compliance-env.rst index f7095d17bf..ee0a4e8a20 100644 --- a/docs/platform/concepts/enhanced-compliance-env.rst +++ b/docs/platform/concepts/enhanced-compliance-env.rst @@ -50,24 +50,31 @@ Environment: The necessary peering information to enable the peer from our end. This differs between clouds: -AWS: - * AWS account ID - * VPC ID -GCP: - * GCP Project ID - * VPC Network Name -Azure: - * Azure Tenant ID - * Azure App ID - * Azure VNet ID +**AWS:** + +* AWS account ID +* VPC ID + +**GCP:** + +* GCP Project ID +* VPC Network Name + +**Azure:** + +* Azure Tenant ID +* Azure App ID +* Azure VNet ID What compliances are covered? -------------------------------- Although not exhaustive, Aiven is capable of supporting both the Health Insurance Portability and Accountability Act (HIPAA) and the Payment Card Industry Data Security Standard (PCI DSS) -compliances. If you require compliance beyond these please contact our sales department so we +compliances. + +If you require compliance beyond these please contact our sales department so we can better understand your specific needs. Additionally, we also offer an alternative deployment -option -- :doc:`Bring Your Own Cloud (BYOC) `. +option. See :doc:`Bring Your Own Cloud (BYOC) `. Migrating ---------------- diff --git a/docs/platform/concepts/free-plan.rst b/docs/platform/concepts/free-plan.rst index 887e3e2b0f..c4ae5e5ba8 100644 --- a/docs/platform/concepts/free-plan.rst +++ b/docs/platform/concepts/free-plan.rst @@ -23,10 +23,11 @@ Free plans include: * Monitoring for metrics and logs * Backups * Integrations between different Aiven services including free, paid, and trial services -* AWS hosting in a limited number of regions: - * EMEA: aws-eu-north-1, aws-eu-west-1, aws-eu-west-2, aws-eu-west-3 - * Americas: aws-us-east-1, aws-us-east-2, aws-us-west-2, aws-ca-central-1 - * APAC: aws-ap-south-1 +* DigitalOcean hosting in a limited number of regions: + + * EMEA: do-ams (Amsterdam), do-ldn (London), do-fra (Frankfurt) + * Americas: do-nyc (New York), do-sfo (San Francisco), do-tor (Toronto) + * APAC: do-blr (Bangalore) There are some limitations of the free plan services: @@ -35,7 +36,7 @@ There are some limitations of the free plan services: * No forking * For PostgreSQL: no connection pooling * Support only through the `Aiven Community Forum `_ -* Only a limited number of AWS regions, no other cloud providers +* Only a limited number of DigitalOcean regions, no other cloud providers * Only one service per service type per user and :doc:`organization ` * Not covered under Aiven's 99.99% SLA diff --git a/docs/platform/concepts/logs-metrics-alerts.rst b/docs/platform/concepts/logs-metrics-alerts.rst index 8fd9273c08..67658bc754 100644 --- a/docs/platform/concepts/logs-metrics-alerts.rst +++ b/docs/platform/concepts/logs-metrics-alerts.rst @@ -6,12 +6,12 @@ Administrators can configure log and metrics integrations to Aiven services so t Logs ---- -Choose **Manage integrations** from the service overview page to add an integration that will send service logs to an Aiven for OpenSearch® service. This can be an existing service, or you can choose to create a new one. +On the **Overview** page of your service, select **Integrations** to add an integration that will send service logs to an Aiven for OpenSearch® service. This can be an existing service, or you can choose to create a new one. Metrics ------- -On the service overview page, choose **Manage integrations** to set up an integration to push service metrics to an M3, InfluxDB® or PostgreSQL® service on Aiven. This can be an existing service or you can create a new one to receive the metrics. +On the **Overview** page of your service, select **Integrations** to set up an integration to push service metrics to an M3, InfluxDB® or PostgreSQL® service on Aiven. This can be an existing service or you can create a new one to receive the metrics. Dashboards ---------- diff --git a/docs/platform/concepts/maintenance-window.rst b/docs/platform/concepts/maintenance-window.rst index 5c33768754..8abbad37bb 100644 --- a/docs/platform/concepts/maintenance-window.rst +++ b/docs/platform/concepts/maintenance-window.rst @@ -1,5 +1,8 @@ +Service maintenance +=================== + Maintenance window -================== +------------------ The **maintenance window** is a time window during which the nodes behind Aiven services are switched to new upgraded version and, once the process is completed, the overall URI DNS name is pointed at the new location. @@ -22,14 +25,23 @@ In case of **Apache Kafka®** and **OpenSearch®** the service DNS address resol Starting with Aiven for OpenSearch® versions 1.3.13 and 2.10, OpenSearch Dashboards will remain available during a maintenance update that also consists of version updates to your Aiven for OpenSearch service. Maintenance updates -~~~~~~~~~~~~~~~~~~~ +------------------- Security updates, platform updates that affect reliability or stability of the service nodes, and quarterly patch updates are always mandatory. Other updates are initially optional. Advance notice is given for all updates. After optional updates have been available for six months, they become mandatory and are applied on the next week's maintenance window at the earliest. This means you have at least 7 days advance notice with exception of critical security updates. These critical updates are applied in the maintenance window of the current week. During service upgrades, maintenance updates are automatically applied and do not require any action from you. You can view maintenance updates pending for your service using the following: -- Service's **overview** page in `Aiven Console `_ +- `Aiven Console `_ > **Service settings** page > **Service management** section - ``avn service get`` command in `Aiven CLI `_ - ``service`` endpoint in `Aiven REST API `_ + +Periodic infrastructure updates +-------------------------------- + +Maintenance updates are scheduled automatically for services with nodes active for 180 days and more. + +.. important:: + + Periodic infrastructure updates are mandatory for all the services except for those with maintenance disabled. diff --git a/docs/platform/concepts/managed-users.rst b/docs/platform/concepts/managed-users.rst new file mode 100644 index 0000000000..cace6b3200 --- /dev/null +++ b/docs/platform/concepts/managed-users.rst @@ -0,0 +1,10 @@ +Managed users +============== + +The managed users feature provides a centralized way of managing all of your organization's users, including editing their profiles, resetting passwords, and :doc:`setting authentication policies `. + +When you :doc:`verify a domain `, existing organization users automatically become managed users. + +A managed user cannot create new organizations unless they are a :doc:`super admin ` of the organization that they are managed by. + +To see a list of all users in your organization go to **Admin** and select **Users**. \ No newline at end of file diff --git a/docs/platform/concepts/service-level-agreement.rst b/docs/platform/concepts/service-level-agreement.rst deleted file mode 100644 index 9a1799dd11..0000000000 --- a/docs/platform/concepts/service-level-agreement.rst +++ /dev/null @@ -1,6 +0,0 @@ -Service level agreement -======================= - -The Aiven service level agreement (SLA) details can be found at `aiven.io/sla `_. - -Custom SLAs are available for premium plans. Contact us at sales@Aiven.io for more details. diff --git a/docs/platform/concepts/service-memory-limits.rst b/docs/platform/concepts/service-memory-limits.rst index 4ec4e7fc7b..8af1547cfb 100644 --- a/docs/platform/concepts/service-memory-limits.rst +++ b/docs/platform/concepts/service-memory-limits.rst @@ -10,9 +10,9 @@ The practical memory limit will always be less than the service physical memory A server (or node) **usable memory** can be calculated as: - |vm_usable_memory| +|vm_usable_memory| -.. important:: This ``overhead`` is currently calculated as: |vm_overhead| +.. important:: ``overhead`` is calculated as: |vm_overhead|. Services may utilize optional components, service integrations, connection pooling, or plug-ins, which are not included in overhead calculations. diff --git a/docs/platform/concepts/service_backups.rst b/docs/platform/concepts/service_backups.rst index 6742a91e66..72739e18a4 100644 --- a/docs/platform/concepts/service_backups.rst +++ b/docs/platform/concepts/service_backups.rst @@ -1,7 +1,7 @@ Backups at Aiven ================ -This article provides information on general rules for handling service backups in Aiven. It also covers service-specific backup details, such as backup frequency and retention period per service. Learn about our backup-restore strategies for powering-off/on services and find out if Aiven allows accessing backups. +On top of general rules for handling service backups in Aiven, there are service-specific backup details, such as backup frequency and retention period per service. Backup policies for service power-off/on and service deletion are common for all the services, similarly as the backup access policy. About backups at Aiven ---------------------- @@ -15,7 +15,7 @@ Aiven takes service backups for managing purposes. These backups are compressed .. note:: If you change a cloud provider or an availability zone for your service, its backups are not migrated from their original location. Service power-off/on backup policy ------------------------------------- +---------------------------------- Whenever a service is powered on from a powered-off state, the latest available backup is restored. @@ -23,6 +23,31 @@ Services that have been powered off for more than 180 days are reviewed. A notif If you wish to keep the powered-off service for more than 180 days, power on the service and then power it off again to avoid the routine cleanup. +Service backup deletion policy +------------------------------ + +For services that have been deleted for over 41 days, all the backups are automatically deleted and, hence, no longer available. + +Access to backups +----------------- + +The Aiven platform takes care of all maintenance operations required for running complex software at scale, allowing you to focus on using your services. The open-source tools used for service backups can be leveraged in your own infrastructure. + +Since service backups are encrypted and stored in the object storage, accessing them is not possible. If you do need to backup your service, use the standard tooling for this service. + +Recommended backup tools per service are as follows: + +* `PostgreSQL® `__: ``pgdump`` +* `MySQL® `_: ``mysqldump`` +* `Redis®* `_: ``redis-cli`` +* `Cassandra® `_: ``cqlsh`` +* `OpenSearch® `_: ``elasticdump`` +* `InfluxDB® `_: ``influxd`` + +.. note:: + + The listed backup tools are merely recommendations and are not intended to create a snapshot of your Aiven service but to provide access to the data. + Backup profile per service -------------------------- @@ -95,7 +120,7 @@ For Aiven for PostgreSQL, full daily backups are taken, and WAL segments are con You can supplement this with a remote read-only replica service, which you can run in a different cloud region or with another cloud provider and promote to master if needed. -To shift the backup schedule to a new time, you can modify the backup time configuration option in **Advanced configuration** in `Aiven Console `_ (the service's **Overview** page). If a recent backup has been taken, it may take another backup cycle before the new backup time takes effect. +To shift the backup schedule to a new time, you can modify the backup time configuration option in **Advanced configuration** in `Aiven Console `_ (the service's **Service settings** page). If a recent backup has been taken, it may take another backup cycle before the new backup time takes effect. .. seealso:: @@ -110,7 +135,7 @@ Aiven for MySQL® Aiven for MySQL databases are automatically backed up with full daily backups and binary logs recorded continuously. All backups are encrypted with the open source `myhoard `_ software. Myhoard uses `Percona XtraBackup `_ internally for taking full (or incremental) snapshots for MySQL. -To shift the backup schedule to a new time, you can modify the backup time configuration option in **Advanced configuration** in `Aiven Console `_ (the service's **Overview** page). If a recent backup has been taken, it may take another backup cycle before the new backup time takes effect. +To shift the backup schedule to a new time, you can modify the backup time configuration option in **Advanced configuration** in `Aiven Console `_ (the service's **Service settings** page). If a recent backup has been taken, it may take another backup cycle before the new backup time takes effect. .. seealso:: @@ -144,7 +169,7 @@ Aiven for Redis backups are taken every 12 hours. For persistence, Aiven supports Redis Database Backup (RDB). -You can control the persistence feature using ``redis_persistence`` under **Advanced configuration** in `Aiven Console `_ (the service's **Overview** page): +You can control the persistence feature using ``redis_persistence`` under **Advanced configuration** in `Aiven Console `_ (the service's **Service settings** page): * When ``redis_persistence`` is set to ``rdb``, Redis does RDB dumps every 10 minutes if any key is changed. Also, RDB dumps are done according to the backup schedule for backup purposes. * When ``redis_persistence`` is ``off``, no RDB dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason or if the service is powered off. This also means the service can't be forked. @@ -173,27 +198,4 @@ Aiven for ClickHouse backups contain database lists, table schemas, table conten .. seealso:: - For more information on Aiven for ClickHouse backups, see :ref:`Backup and restore `. - -Access to backups ------------------ - -The Aiven platform takes care of all maintenance operations required for running complex software at scale, allowing you to focus on using your services. The open-source tools used for service backups can be leveraged in your own infrastructure. - -The Aiven platform is designed to handle the operational aspects of running complex software at scale, allowing you to focus on using the services instead of maintaining them. Aiven handles service availability, security, connectivity, and backups. - -Since service backups are encrypted and stored in the object storage, accessing them is not possible. If you do need to backup your service, use the standard tooling for this service. - - -Recommended backup tools per service are as follows: - -* `PostgreSQL `__: ``pgdump`` -* `MySQL `_: ``mysqldump`` -* `Redis `_: ``redis-cli`` -* `Cassandra `_: ``cqlsh`` -* `OpenSearch `_: ``elasticdump`` -* `InfluxDB `_: ``influxd`` - -.. note:: - - The listed backup tools are merely recommendations and are not intended to create a snapshot of your Aiven service but to provide access to the data. + For more information on Aiven for ClickHouse backups, see :doc:`Backup and restore `. diff --git a/docs/platform/concepts/tls-ssl-certificates.rst b/docs/platform/concepts/tls-ssl-certificates.rst index e900b1e9de..757c213059 100644 --- a/docs/platform/concepts/tls-ssl-certificates.rst +++ b/docs/platform/concepts/tls-ssl-certificates.rst @@ -17,11 +17,11 @@ Certificate requirements Most of our services use a browser-recognized CA certificate, but there are exceptions: -- **Aiven for PostgreSQL®** requires the Aiven project CA certificate to connect when using `verify-ca` or `verify-full` as ``sslmode``. The first mode requires the client to verify that the server certificate is actually emitted by the Aiven CA, while the second provides maximum security by performing HTTPS-like validation on the hostname as well. The default ``sslmode``, `require`, ensures TLS is used when connecting to the database, but does not verify the server certificate. For more information, see the `PostgreSQL documentation `_ +- **Aiven for PostgreSQL®** requires the Aiven project CA certificate to connect when using `verify-ca` or `verify-full` as ``sslmode``. The first mode requires the client to verify that the server certificate is actually emitted by the Aiven CA, while the second provides maximum security by performing HTTPS-like validation on the hostname as well. The default ``sslmode``, ``require``, ensures TLS is used when connecting to the database, but does not verify the server certificate. For more information, see the `PostgreSQL documentation `_ - **Aiven for Apache Kafka®** requires the Aiven project CA certificate, and also the client key and certificate. -For these services you can :doc:`/docs/platform/howto/download-ca-cert` from the service overview page. +For these services you can :doc:`/docs/platform/howto/download-ca-cert` from **Overview** page of your service. .. note:: Older/existing services may be using the Aiven project's CA, you can request switching to a browser-recognized certificate by opening support ticket and letting us know. diff --git a/docs/platform/howto.rst b/docs/platform/howto.rst deleted file mode 100644 index 127b82935d..0000000000 --- a/docs/platform/howto.rst +++ /dev/null @@ -1,19 +0,0 @@ -:orphan: - -How-To -======= - -Find instructions for common Aiven platform tasks for the following categories: - -* :doc:`Billing and payments ` - -* :doc:`Organizations, units, and projects `. - -* :doc:`Service management ` - -* :doc:`Networking and security ` - -* :doc:`Monitoring and logs ` - -* :doc:`Integrations ` - diff --git a/docs/platform/howto/add-authentication-method.rst b/docs/platform/howto/add-authentication-method.rst index 165ad3d6ae..1ca5f2d334 100644 --- a/docs/platform/howto/add-authentication-method.rst +++ b/docs/platform/howto/add-authentication-method.rst @@ -10,7 +10,4 @@ To add an authentication method for your user account in the `Aiven Console `. \ No newline at end of file +After authorizing access, the new method is shown in the list. \ No newline at end of file diff --git a/docs/platform/howto/add-storage-space.rst b/docs/platform/howto/add-storage-space.rst index 3def20a8f0..3df3da1831 100644 --- a/docs/platform/howto/add-storage-space.rst +++ b/docs/platform/howto/add-storage-space.rst @@ -4,6 +4,7 @@ Add or remove storage With :doc:`dynamic disk sizing `, you can add or remove disk storage (by factor of 10 GiB) both when you create a service and later for a running service. .. note:: + - You cannot add or remove storage when service nodes are in the rebalancing state, for example, during a maintenance update or a service upgrade. - This feature is not available for all service plans. @@ -21,19 +22,17 @@ Add storage to a running service You can add storage to your running service in `Aiven Console `_ without interrupting the service. -1. Log in to `Aiven Console `_, and go to your project. +1. Log into the `Aiven Console `_, select your project and then select the service for which you want to add storage. +2. On the service page, click **Service settings** from the sidebar. +3. In the **Service plan** section, click **Actions (...)**. +4. From the dropdown menu, select **Manage additional storage**. +5. In the **Upgrade service storage** dialog, click **Change plan** choose the new service plan and tier or use the slider to add disk storage. -2. On the **Services** page, select your service. - -3. On the **Overview** page of your service, go to the **Service plan** section, and select **Add storage**. - -4. In the **Upgrade service storage** window, use the slider to add disk storage. - -.. note:: + .. note:: The price shown for the additional storage includes backup costs. -5. Select **Save changes**. +9. Click **Save changes**. .. topic:: Result @@ -57,18 +56,16 @@ Before you start Remove added storage """""""""""""""""""" -#. Log in to `Aiven Console `_, and go to your project. - -#. On the **Services** page, select your service. - -#. On the **Overview** page of your service, go to the **Service plan** section, and select **Edit** next to **Additional disk storage**. - -#. In the **Upgrade service storage** window, use the slider to remove disk storage. +1. Log into the `Aiven Console `_, select your project and then select the service for which you want to add storage. +2. On the service page, click **Service settings** from the sidebar. +3. In the **Service plan** section, click **Actions (...)**. +4. From the dropdown menu, select **Manage additional storage**. +5. In the **Upgrade service storage** dialog, use the slider to remove disk storage. .. note:: You can only remove storage that you previously added using this feature. If you want to downgrade further, you can :doc:`change your service plan `. -#. Select **Save changes**. +6. Click **Save changes**. .. topic:: Result @@ -87,4 +84,5 @@ For example, if you use a ``Startup-4`` plan with a 80-GiB disk by default and y .. note:: - When you perform a service upgrade or downgrade horizontally, remember to include all additional disks the service uses. For example, when switching from ``Startup-4`` to ``Business-4`` or from ``Business-4`` to ``Startup-4``, include all the additional disks available for this service. + - Similarly, when you fork an existing service, include all additional disks the service uses. \ No newline at end of file diff --git a/docs/platform/howto/billing-google-cloud-platform-marketplace-subscription.rst b/docs/platform/howto/billing-google-cloud-platform-marketplace-subscription.rst index aeaa655e95..5201f5898d 100644 --- a/docs/platform/howto/billing-google-cloud-platform-marketplace-subscription.rst +++ b/docs/platform/howto/billing-google-cloud-platform-marketplace-subscription.rst @@ -8,15 +8,15 @@ First, there are some steps that need to be completed on the Google Cloud Market Google Cloud Marketplace setup ------------------------------ -1. Navigate to `Aiven Managed Database Services on the Google Cloud Marketplace `_. This page contains information about all of Aiven's services and how the marketplace subscription works. Click the **Subscribe** button on this page. +#. Navigate to `Aiven Managed Database Services on the Google Cloud Marketplace `_. This page contains information about all of Aiven's services and how the marketplace subscription works. Click the **Subscribe** button on this page. -2. Select your desired billing account, then read and agree to the terms and conditions. +#. Select your desired billing account, then read and agree to the terms and conditions. -3. When you are ready, click the **Subscribe** button at the bottom of the page. You will NOT be charged by clicking this button; this only sets up a billing subscription between GCP and Aiven. You will only be charged after deploying Aiven services. +#. When you are ready, click the **Subscribe** button at the bottom of the page. You will NOT be charged by clicking this button; this only sets up a billing subscription between GCP and Aiven. You will only be charged after deploying Aiven services. -4. You should now see a message that says "Your order request has been sent to Aiven". Click on the **Go to product page** button. +#. You should now see a message that says "Your order request has been sent to Aiven". Click on the **Go to product page** button. -5. Everything is now complete in your GCP account, but you still need to setup the Aiven account. Click on the **Manage on provider** button to go to the Aiven console to complete the process. +#. Everything is now complete in your GCP account, but you still need to setup the Aiven account. Click on the **Manage on provider** button to go to the Aiven console to complete the process. .. image:: /images/platform/howto/gcp-manage-on-provider.png :alt: Google Cloud Marketplace page after subscribing, showing the "Manage on provider" button @@ -25,14 +25,15 @@ Google Cloud Marketplace setup Aiven account setup ------------------- -6. You should now be on a signup page at Aiven, asking you for your email address to create a new account. +#. You should now be on a signup page at Aiven, asking you for your email address to create a new account. -7. After entering your email address, you will be sent an email to confirm your registration. Click on the link. +#. After entering your email address, you will be sent an email to confirm your registration. Click on the link. -8. You can now proceed to the `Aiven console for GCP `_, where you can manage your Aiven services as normal. +#. You can now proceed to the `Aiven console for GCP `_, where you can manage your Aiven services as normal. .. image:: /images/platform/howto/gcp-console.png :alt: The GCP version of the Aiven web console + .. note:: Note the URL is https://console.gcp.aiven.io - this uses a different account system than https://console.aiven.io. If you have an existing Aiven account you will need to create a new Aiven GCP account using the Aiven GCP console, and when coming back to Aiven in the future, you will need to use https://console.gcp.aiven.io to login. diff --git a/docs/platform/howto/byoc/add-customer-info-custom-cloud.rst b/docs/platform/howto/byoc/add-customer-info-custom-cloud.rst index 7d53817404..1ab0fcc93a 100644 --- a/docs/platform/howto/byoc/add-customer-info-custom-cloud.rst +++ b/docs/platform/howto/byoc/add-customer-info-custom-cloud.rst @@ -1,11 +1,11 @@ -Add or remove customer contacts for your custom cloud -===================================================== +Add or remove customer contacts for your AWS custom cloud in Aiven +================================================================== -.. important:: +Update the list of customer contacts for your :doc:`custom cloud `. - Creating custom clouds in your Aiven organization requires enabling :doc:`the bring your own cloud (BYOC) feature `, which is a :doc:`limited availability feature `. If you're interested in trying it out, contact the sales team at `sales@Aiven.io `_. +.. important:: -This article details how to update the list of customer contacts for your custom cloud using `Aiven Console `_. + Custom cloud configuration in Aiven is an :doc:`early availability feature `. You cover the costs associated with building and maintaining your custom cloud: payments for your integrated AWS infrastructure and Aiven services within the custom cloud. About updating customer contacts -------------------------------- @@ -23,16 +23,18 @@ Update the contacts list ------------------------ 1. Log in to `Aiven Console `_ as an administrator. -2. From the left sidebar, select **Bring your own cloud**. -3. In the **Bring you own cloud** view, select one of the clouds available on the list. -4. In the selected cloud's page, use the ellipsis (**...**) menu in the top right corner to select **Customer contact**. -5. In the **Customer contact** window, select a new contact's role from the dropdown menu, enter the email address, and select **+** to add the provided contact's details. +2. Select the organization you want to use from the dropdown menu in the top right corner. +3. From the top navigation bar, select **Admin**. +4. From the left sidebar, select **Bring your own cloud**. +5. In the **Bring your own cloud** view, select one of the clouds available on the list. +6. In the selected cloud's page, use the ellipsis (**...**) menu in the top right corner to select **Customer contact**. +7. In the **Customer contact** window, select a new contact's role from the dropdown menu, enter the email address, and select **+** to add the provided contact's details. .. note:: You can add multiple customer contacts for your custom cloud. -6. When you're done adding all the contacts, select **Save changes**. +8. When you're done adding all the contacts, select **Save changes**. .. topic:: Result @@ -44,13 +46,15 @@ Check it out You can preview the updated list of contacts by taking the following steps: 1. Log in to `Aiven Console `_ as an administrator. -2. From the left sidebar, select **Bring your own cloud**. -3. In the **Bring you own cloud** view, select one of the clouds available on the list. -4. In the selected cloud's page, use the ellipsis (**...**) menu in the top right corner. -5. Select **Customer contact** from the options available on the the ellipsis (**...**) menu. - -Related reading ---------------- +2. Select the organization you want to use from the dropdown menu in the top right corner. +3. From the top navigation bar, select **Admin**. +4. From the left sidebar, select **Bring your own cloud**. +5. In the **Bring your own cloud** view, select one of the clouds available on the list. +6. In the selected cloud's page, use the ellipsis (**...**) menu in the top right corner. +7. Select **Customer contact** from the options available on the the ellipsis (**...**) menu. + +Related pages +------------- * :doc:`Bring your own cloud ` * :doc:`Create a custom cloud in Aiven ` diff --git a/docs/platform/howto/byoc/assign-project-custom-cloud.rst b/docs/platform/howto/byoc/assign-project-custom-cloud.rst index afc5124763..d04b9240c0 100644 --- a/docs/platform/howto/byoc/assign-project-custom-cloud.rst +++ b/docs/platform/howto/byoc/assign-project-custom-cloud.rst @@ -1,16 +1,27 @@ -Assign a project to your custom cloud -===================================== +Enable your AWS custom cloud in Aiven organizations, units, or projects +======================================================================= + +To be able to use a :doc:`custom cloud ` in your Aiven organizations, units, or projects, you need to configure its availability. .. important:: - Creating custom clouds in your Aiven organization requires enabling :doc:`the bring your own cloud (BYOC) feature `, which is a :doc:`limited availability feature `. If you're interested in trying it out, contact the sales team at `sales@Aiven.io `_. + Custom cloud configuration in Aiven is an :doc:`early availability feature `. You cover the costs associated with building and maintaining your custom cloud: payments for your integrated AWS infrastructure and Aiven services within the custom cloud. + +About making custom clouds available from your projects +------------------------------------------------------- + +With the BYOC feature enabled, you can :doc:`create custom clouds ` in your Aiven organization. As a part of the :doc:`initial custom cloud's setup in Aiven Console `, you select in what projects you'll be able to use your new custom cloud to create services. You decide if you want to make your cloud available for all the projects in your organization, selected organizational units, or specific projects only. + +Later, you can come back to the **Available projects** tab in your cloud's page in `Aiven Console `_ and update the settings you configured during the :doc:`initial custom cloud's setup `. + -This article details how to update the list of projects assigned to your custom cloud using `Aiven Console `_. +1. In the **Custom cloud's availability in your organization** section, select either: -About assigning projects to your custom cloud ---------------------------------------------- + * **By default for all projects** to make your custom cloud available in all existing and future projects in the organization, or; -With the BYOC feature enabled, you can :doc:`create custom clouds ` in your Aiven organizations. While :doc:`setting up a custom cloud in Aiven `, you add projects for this cloud, which is a part of the initial custom cloud's configuration. Later, you can come back to the **Projects availability** tab in your cloud's page in `Aiven Console `_ and update the projects list you initially created for your cloud. + * **By selection** to pick specific projects or organizational units where you want your custom cloud to be available. + +2. If you go for the **By selection** option, the **Assign organizational units** field and the **Assign projects** field show up. Enter the names of organizational units and/ or projects in which you want to be able to use your custom cloud. Prerequisites ------------- @@ -19,34 +30,45 @@ Prerequisites * At least one :doc:`custom cloud created ` in your Aiven organization * Access to `Aiven Console `_ -Assign projects ---------------- +Enable projects to use your custom cloud +---------------------------------------- 1. Log in to `Aiven Console `_ as an administrator. -2. From the left sidebar, select **Bring your own cloud**. -3. In the **Bring you own cloud** view, select one of the clouds available on the list. -4. In the selected cloud's page, navigate to the **Projects availability** tab and select **Assign projects**. -5. In the **Assign projects** window, use the dropdown menu to select a project you want to assign to your cloud. -6. Confirm your choice by selecting **Assign projects**. +2. Select the organization you want to use from the dropdown menu in the top right corner. +3. From the top navigation bar, select **Admin**. +4. From the left sidebar, select **Bring your own cloud**. +5. In the **Bring your own cloud** view, select one of the clouds available on the list. +6. In the selected cloud's page, navigate to the **Available projects** tab and modify the settings provided as needed: + + * Select **Set availability** to decide if your custom cloud is available in all the projects in your organization or in selected projects only. In the **Custom cloud's availability in your organization** window, select either **By default for all projects** or **By selection**. If you go for the **By selection** option, dropdown menus **Assign organizational units** and **Assign projects** show up. Use them to select desired organizational units and/ or projects and confirm your choice by selecting **Save**. + + .. note:: + + By selecting an organizational unit, you make your custom cloud available from all the projects in this unit. + + * Select **Assign projects** to enable your custom cloud in specific organizational units and/ or projects. In the **Assign projects** window, use the available dropdown menus to select desired units and/ or projects as needed. Confirm your choice by selecting **Assign projects**. .. topic:: Result - Another project has been added to your custom cloud. + In the projects and/ or organizational units you assigned, you can create services using your custom cloud. Check it out ------------ -You can preview the updated list of assigned projects by taking the following steps: +You can verify if the cloud availability changes you made are live by taking the following steps: 1. Log in to `Aiven Console `_ as an administrator. -2. From the left sidebar, select **Bring your own cloud**. -3. In the **Bring you own cloud** view, select one of the clouds available on the list. -4. In the selected cloud's page, navigate to the **Projects availability** tab. +2. Select the organization you want to use from the dropdown menu in the top right corner. +3. From the top navigation bar, select **Admin**. +4. From the left sidebar, select **Bring your own cloud**. +5. In the **Bring your own cloud** view, select one of the clouds available on the list. +6. In the selected cloud's page, navigate to the **Available projects** tab and check the available projects and organizational units list for the updates you made. -Related reading ---------------- +Related pages +------------- * :doc:`Bring your own cloud ` +* :doc:`Enable the bring your own cloud (BYOC) feature ` * :doc:`Create a custom cloud in Aiven ` * :doc:`Add customer's contact information for your custom cloud ` * :doc:`Rename your custom cloud ` diff --git a/docs/platform/howto/byoc/create-custom-cloud.rst b/docs/platform/howto/byoc/create-custom-cloud.rst index 7b218ade5f..0160e63224 100644 --- a/docs/platform/howto/byoc/create-custom-cloud.rst +++ b/docs/platform/howto/byoc/create-custom-cloud.rst @@ -1,49 +1,456 @@ -Create a custom cloud in Aiven -============================== +Create an AWS custom cloud in Aiven +=================================== -A :doc:`custom cloud ` is your own cloud infrastructure integrated with your Aiven organization. Using a custom cloud in Aiven may be the optimal solution if you have specific business needs or project requirements, such as a strict regulatory compliance. +Create a :doc:`custom cloud ` in your Aiven organization to better address your specific business needs or project requirements. -.. important:: +.. note:: + + * Creating and using custom clouds in your Aiven organization requires enabling :doc:`the bring your own cloud (BYOC) feature `. Check the availability of the feature in :ref:`Who is eligible for BYOC `. To activate BYOC in your Aiven organization, follow the steps in :doc:`Enable bring your own cloud (BYOC) with Aiven `. + + * Enabling :doc:`the BYOC feature ` or creating custom clouds in your Aiven environment does not affect the configuration of your existing organizations, projects, or services. This only makes the new BYOC capabilities available in your environment. - Creating custom clouds in your Aiven organization requires enabling :doc:`the bring your own cloud (BYOC) feature `, which is a :doc:`limited availability feature `. If you're interested in trying it out, contact the sales team at `sales@Aiven.io `_. +.. important:: -This article provides you with instructions on how to :ref:`add a custom cloud ` to your Aiven organization. + Custom cloud configuration in Aiven is an :doc:`early availability feature `. You cover the costs associated with building and maintaining your custom cloud: payments for your integrated AWS infrastructure and Aiven services within the custom cloud. About creating a custom cloud ----------------------------- -If you have the administrator's role in your Aiven organization, and you enable BYOC, you can create a custom cloud on the Aiven platform. For this purpose, you'll need to configure your custom cloud setup in `Aiven Console `_ and prepare your own Amazon Web Services (AWS) account so that Aiven can access it. +Before creating a custom cloud, make sure you understand all the :ref:`limitations ` and meet all the :ref:`prerequisites `. + +The process of creating a custom cloud in Aiven differs depending on the cloud provider you want to integrate with: + +* If you want to use the AWS cloud provider, create your custom cloud yourself in `Aiven Console `_. + +.. topic:: BYOC self-service in Aiven Console + + You configure your custom cloud setup in `Aiven Console `_ and prepare your own AWS account so that Aiven can access it. In `Aiven Console `_, you follow the **Create custom cloud** workflow to generate a Terraform infrastructure-as-code (IaC) template. Next, you deploy this template in your AWS account to acquire IAM Role ARN (Amazon Resource Name). You supply your IAM Role ARN into the **Create custom cloud** wizard, which gives Aiven the permissions to securely access your AWS account, create resources, and manage them onward. Finally, you select projects that can use your new custom clouds for creating services, and you add customer contacts for your custom cloud. -In `Aiven Console `_, you'll use the **Create custom cloud** workflow to generate a Terraform infrastructure-as-code (IaC) template. Next, you'll deploy this template in your AWS account to acquire Role ARN (Amazon Resource Name). You'll supply your Role ARN into the **Create custom cloud** workflow, which will give Aiven the permissions to securely access your AWS account, create resources, and manage them onward. Finally, you'll assign projects and add customer contacts for your custom cloud. +* If you want to use the GCP or Azure cloud providers, request the Aiven team to create the cloud. + +.. _byoc-limitations: Limitations -''''''''''' +----------- -* Administrator's role is required for creating custom clouds. -* :doc:`BYOC limited availability version ` supports the AWS cloud provider only. +* You need at least the Priority tier of Aiven support services to be eligible for activating BYOC. +* If you want to build your custom cloud with a cloud provider other than AWS, you need to request it as detailed in :doc:`Enable bring your own cloud (BYOC) with Aiven ` and follow up with the Aiven team. * BYOC is supported with the :ref:`standard deployment ` model only. +* Only organization's administrators can create custom clouds. + +.. _byoc-prerequisites: Prerequisites ------------- -* Administrator's role for your Aiven organization -* AWS account -* BYOC feature enabled for your Aiven organization by the sales team (`sales@Aiven.io `_) -* Access to `Aiven Console `_ -* Terraform installed +* You have :doc:`enabled the BYOC feature `. +* You have an active account with your cloud provider. +* You have access to `Aiven Console `_ (:ref:`to integrate with AWS `). +* You have administrator's role in your Aiven organization (:ref:`to integrate with AWS `). +* You have Terraform installed (:ref:`to integrate with AWS `). +* You have AWS credentials set up on your machine so that your user or role has required Terraform permissions (:ref:`to integrate with AWS `) as follows: + +.. dropdown:: Show permissions required for creating resources for bastion and workload networks + + .. code-block:: bash + + { + "Statement": [ + { + "Action": [ + "iam:AttachRolePolicy", + "iam:CreateRole", + "iam:DeleteRole", + "iam:DeleteRolePolicy", + "iam:GetRole", + "iam:GetRolePolicy", + "iam:ListAttachedRolePolicies", + "iam:ListInstanceProfilesForRole", + "iam:ListRolePolicies", + "iam:PutRolePolicy", + "iam:UpdateAssumeRolePolicy" + ], + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/cce-*-iam-role" + }, + { + "Action": [ + "ec2:DescribeAddresses", + "ec2:DescribeAddressesAttribute", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeInternetGateways", + "ec2:DescribeNatGateways", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSecurityGroupRules", + "ec2:DescribeStaleSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:DescribeVpcAttribute", + "ec2:DescribeTags" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "Describe" + }, + { + "Action": [ + "ec2:CreateTags" + ], + "Condition": { + "ForAllValues:StringEquals": { + "aws:TagKeys": [ + "Name", + "aiven_custom_cloud_environment_id", + "aiven_security_group", + "aiven_subnet" + ] + }, + "StringEquals": { + "ec2:CreateAction": [ + "AllocateAddress", + "CreateInternetGateway", + "CreateNatGateway", + "CreateRoute", + "CreateRouteTable", + "CreateSecurityGroup", + "CreateSubnet", + "CreateVpc" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:elastic-ip/*", + "arn:aws:ec2:*:*:internet-gateway/*", + "arn:aws:ec2:*:*:natgateway/*", + "arn:aws:ec2:*:*:route-table/*", + "arn:aws:ec2:*:*:security-group/*", + "arn:aws:ec2:*:*:security-group-rule/*", + "arn:aws:ec2:*:*:subnet/*", + "arn:aws:ec2:*:*:vpc/*" + ], + "Sid": "CreateTag" + }, + { + "Action": [ + "ec2:DeleteTags" + ], + "Condition": { + "ForAllValues:StringEquals": { + "aws:TagKeys": [ + "Name", + "aiven_custom_cloud_environment_id", + "aiven_security_group", + "aiven_subnet" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:elastic-ip/*", + "arn:aws:ec2:*:*:internet-gateway/*", + "arn:aws:ec2:*:*:natgateway/*", + "arn:aws:ec2:*:*:route-table/*", + "arn:aws:ec2:*:*:security-group/*", + "arn:aws:ec2:*:*:security-group-rule/*", + "arn:aws:ec2:*:*:subnet/*", + "arn:aws:ec2:*:*:vpc/*" + ], + "Sid": "DeleteTag" + }, + { + "Action": [ + "ec2:AllocateAddress", + "ec2:CreateInternetGateway", + "ec2:CreateVpc" + ], + "Condition": { + "StringLike": { + "aws:RequestTag/Name": "cce-*" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:elastic-ip/*", + "arn:aws:ec2:*:*:internet-gateway/*", + "arn:aws:ec2:*:*:vpc/*" + ], + "Sid": "Create" + }, + { + "Action": [ + "ec2:CreateNatGateway" + ], + "Condition": { + "StringNotLike": { + "ec2:ResourceTag/Name": "cce-*" + } + }, + "Effect": "Deny", + "Resource": [ + "arn:aws:ec2:*:*:elastic-ip/*", + "arn:aws:ec2:*:*:subnet/*" + ], + "Sid": "CreateNGWAllowCCESubnetOnly" + }, + { + "Action": [ + "ec2:CreateNatGateway" + ], + "Condition": { + "StringNotLike": { + "aws:RequestTag/Name": "cce-*" + } + }, + "Effect": "Deny", + "Resource": [ + "arn:aws:ec2:*:*:natgateway/*" + ], + "Sid": "CreateNGWAllowCCEOnly" + }, + { + "Action": [ + "ec2:CreateNatGateway" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:elastic-ip/*", + "arn:aws:ec2:*:*:natgateway/*", + "arn:aws:ec2:*:*:subnet/*" + ], + "Sid": "CreateNGW" + }, + { + "Action": [ + "ec2:CreateRouteTable", + "ec2:CreateSecurityGroup", + "ec2:CreateSubnet" + ], + "Condition": { + "StringNotLike": { + "ec2:ResourceTag/Name": "cce-*" + } + }, + "Effect": "Deny", + "Resource": [ + "arn:aws:ec2:*:*:vpc/*" + ], + "Sid": "CreateSubAllowCCEVPCOnly" + }, + { + "Action": [ + "ec2:CreateRouteTable" + ], + "Condition": { + "StringNotLike": { + "aws:RequestTag/Name": "cce-*" + } + }, + "Effect": "Deny", + "Resource": [ + "arn:aws:ec2:*:*:route-table/*" + ], + "Sid": "CreateRTAllowCCEOnly" + }, + { + "Action": [ + "ec2:CreateRouteTable" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:route-table/*", + "arn:aws:ec2:*:*:vpc/*" + ], + "Sid": "CreateRT" + }, + { + "Action": [ + "ec2:CreateSecurityGroup" + ], + "Condition": { + "StringNotLike": { + "aws:RequestTag/Name": "cce-*" + } + }, + "Effect": "Deny", + "Resource": [ + "arn:aws:ec2:*:*:security-group/*" + ], + "Sid": "CreateSGsAllowCCEOnly" + }, + { + "Action": [ + "ec2:CreateSecurityGroup" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:security-group/*", + "arn:aws:ec2:*:*:vpc/*" + ], + "Sid": "CreateSG" + }, + { + "Action": [ + "ec2:CreateSubnet" + ], + "Condition": { + "StringNotLike": { + "aws:RequestTag/Name": "cce-*" + } + }, + "Effect": "Deny", + "Resource": [ + "arn:aws:ec2:*:*:subnet/*" + ], + "Sid": "CreateSubAllowCCEOnly" + }, + { + "Action": [ + "ec2:CreateSubnet" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:subnet/*", + "arn:aws:ec2:*:*:vpc/*" + ], + "Sid": "CreateSubnets" + }, + { + "Action": [ + "ec2:AssociateAddress", + "ec2:AssociateRouteTable", + "ec2:AssociateSubnetCidrBlock", + "ec2:AssociateVpcCidrBlock", + "ec2:AssignPrivateNatGatewayAddress", + "ec2:AttachInternetGateway", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateRoute", + "ec2:ModifySecurityGroupRules", + "ec2:ModifySubnetAttribute", + "ec2:ModifyVpcAttribute", + "ec2:ReplaceRoute", + "ec2:ReplaceRouteTableAssociation", + "ec2:UpdateSecurityGroupRuleDescriptionsEgress", + "ec2:UpdateSecurityGroupRuleDescriptionsIngress" + ], + "Condition": { + "StringLike": { + "ec2:ResourceTag/Name": "cce-*" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:elastic-ip/*", + "arn:aws:ec2:*:*:internet-gateway/*", + "arn:aws:ec2:*:*:natgateway/*", + "arn:aws:ec2:*:*:route-table/*", + "arn:aws:ec2:*:*:security-group/*", + "arn:aws:ec2:*:*:security-group-rule/*", + "arn:aws:ec2:*:*:subnet/*", + "arn:aws:ec2:*:*:vpc/*" + ], + "Sid": "Modify" + }, + { + "Action": [ + "ec2:DisassociateAddress" + ], + "Condition": { + "StringNotLike": { + "ec2:ResourceTag/Name": "cce-*" + } + }, + "Effect": "Deny", + "Resource": [ + "arn:aws:ec2:*:*:elastic-ip/*" + ], + "Sid": "DisassociateEIPAllowCCEOnly" + }, + { + "Action": [ + "ec2:DisassociateAddress" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:*/*" + ], + "Sid": "DisassociateEIP" + }, + { + "Action": [ + "ec2:DetachInternetGateway", + "ec2:DisassociateNatGatewayAddress", + "ec2:DisassociateRouteTable", + "ec2:DisassociateSubnetCidrBlock", + "ec2:DisassociateVpcCidrBlock", + "ec2:DeleteInternetGateway", + "ec2:DeleteNatGateway", + "ec2:DeleteNetworkInterface", + "ec2:DeleteRoute", + "ec2:DeleteRouteTable", + "ec2:DeleteSecurityGroup", + "ec2:DeleteSubnet", + "ec2:DeleteVpc", + "ec2:ReleaseAddress", + "ec2:RevokeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress", + "ec2:UnassignPrivateNatGatewayAddress" + ], + "Condition": { + "StringLike": { + "ec2:ResourceTag/Name": "cce-*" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:elastic-ip/*", + "arn:aws:ec2:*:*:internet-gateway/*", + "arn:aws:ec2:*:*:natgateway/*", + "arn:aws:ec2:*:*:network-interface/*", + "arn:aws:ec2:*:*:route-table/*", + "arn:aws:ec2:*:*:security-group/*", + "arn:aws:ec2:*:*:security-group-rule/*", + "arn:aws:ec2:*:*:subnet/*", + "arn:aws:ec2:*:*:vpc/*" + ], + "Sid": "Delete" + } + ], + "Version": "2012-10-17" + } .. _create-cloud: Create a custom cloud --------------------- -Navigate to BYOC in Aiven Console -''''''''''''''''''''''''''''''''' +.. _create-cloud-non-aws: + +Create a custom cloud with GCP or Azure +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you want to use the GCP or Azure cloud providers, you'll have your custom cloud created by the Aiven team (not via `Aiven Console `_). Therefore, after :doc:`enabling the BYOC feature ` in `Aiven Console `_, there are no further actions required from you to create your custom cloud. We'll build your custom cloud for you according to the specifications you provided while :doc:`enabling BYOC ` in `Aiven Console `_. We might reach out to you for more details if needed and follow up with you to keep you informed on the progress. + +.. _create-cloud-aws: + +Create a custom cloud with AWS +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Create your infrastructure template in `Aiven Console `_, deploy the template in your AWS account to generate Role ARN, and get back to `Aiven Console `_ with your Role ARN to proceed with your custom cloud configuration. Finalize the setup by selecting in which projects you want to use your custom cloud and assigning a contact person for your custom cloud. + +Launch the BYOC setup in Aiven Console +'''''''''''''''''''''''''''''''''''''' 1. Log in to `Aiven Console `_ as an administrator. -2. From the top navigation bar, select **Admin**. -3. From the left sidebar, select **Bring you own cloud**. -4. In the **Bring you own cloud** view, select **Create custom cloud**. +2. Select the organization you want to use from the dropdown menu in the top right corner. +3. From the top navigation bar, select **Admin**. +4. From the left sidebar, select **Bring your own cloud**. +5. In the **Bring your own cloud** view, select **Create custom cloud**. .. _generate-infra-template: @@ -52,25 +459,28 @@ Generate an infrastructure template In this step, an IaC template is generated in the Terraform format. In :ref:`the next step `, you'll deploy this template in your AWS account to acquire Role ARN (Amazon Resource Name), which Aiven needs for accessing your AWS account. -In the **Create custom cloud** workflow, proceed as follows: +In the **Create custom cloud** wizard, proceed as follows: 1. Specify the following: * Custom cloud name - * Cloud provider + * Cloud provider (AWS only) + * Region + * CIDR - .. important:: + The **CIDR** block defines the IP address range of the VPC that Aiven creates in your AWS account. Any Aiven service created in the custom cloud will be placed in the VPC and will get an IP address within this address range. - **Amazon Web Services (AWS)** is the only option supported currently. + In the **CIDR** field, specify an IP address range for the BYOC VPC using a CIDR block notation, for example: ``10.0.0.0/16``, ``172.31.0.0/16``, or ``192.168.0.0/20``. + + Make sure that an IP address range you use meets the following requirements: - * Region - * CIDR + * IP address range is within the private IP address ranges allowed in `RFC 1918 `_. + * CIDR block size is between ``/16`` (65536 IP addresses) and ``/24`` (256 IP addresses). + * CIDR block is large enough to host the desired number of services after splitting it into per-availability-zone subnets. - Aiven needs CIDR for the `CIDR block of the VPC `_ that will be created in your AWS account. + For example, the smallest ``/24`` CIDR block might be enough for a few services but can pose challenges during node replacements or maintenance upgrades if running low on available free IP addresses. - * Specify inbound rules with the CIDR block notation, for example: 200.1.2.3/32 (allowing 200.1.2.3 as a single address), 0.0.0.0/0 (allowing traffic from anywhere), or 100.1.0.0/16 (allowing traffic from 100.1..). - * To create VPC peerings with that VPC, choose a CIDR block that doesn't overlap with CIDR blocks of peer VPCs. - * Keep in mind that CIDR block needs be large enough so that, after splitting it into per-region subnets, each subnet has enough addresses to fit required services. + * CIDR block of your BYOC VCP doesn't overlap with the CIDR blocks of VPCs you plan to peer your BYOC VPC with. You cannot change the BYOC VPC CIDR block after your custom cloud is created. 2. Select **Next**. @@ -85,49 +495,110 @@ Deploy the template to acquire ``Role ARN`` Role ARN is an `identifier of the role `_ created when running the infrastructure template in your AWS account. Aiven uses Role ARN to `assume the role `_ and run operations such as creating VMs for service nodes in your BYOC account. -Use the Terraform template generated in step :ref:`Generate an infrastructure template ` to create your Role ARN by deploying the template in your AWS account. +Use the Terraform template generated in step :ref:`Generate an infrastructure template ` to create your Role ARN by deploying the template in your AWS account. Continue working in the **Create custom cloud** wizard by taking the following steps: -.. important:: +1. Copy or download the template and the variables file from the **Create custom cloud** wizard. - When running ``terraform plan`` and ``terraform apply``, make sure you add ``-var-file=FILE_NAME.vars`` as an option. +2. Optionally, modify the template as needed. -As soon as you acquire Role ARN, enter it into the **Role ARN** field in the **Create custom cloud** workflow, and select **Next** to proceed. + .. note:: + + To connect to a custom-cloud service from different security groups (other than the one dedicated for the custom cloud) or from IP address ranges, add specific ingress rules before you apply a Terraform infrastructure template in your AWS account in the process of creating a custom cloud resources. + + Before adding ingress rules, see the examples provided in the Terraform template you generated and downloaded from `Aiven Console `_. -.. note:: - - You can park your cloud setup here, save your current configuration as a draft, and resume creating your cloud later. +3. In your AWS account, run the template with the variables using Terraform. + + .. important:: + + When running ``terraform plan`` and ``terraform apply``, make sure you add ``-var-file=FILE_NAME.vars`` as an option. + +4. Find the role identifier (Role ARN) in the output script after running the template. +5. Enter Role ARN into the **Role ARN** field in the **Create custom cloud** wizard. +6. Select **Next** to proceed or park your cloud setup here and save your current configuration as a draft by selecting **Save draft**. You can resume creating your cloud later. -Assign projects and customer contacts -''''''''''''''''''''''''''''''''''''' +Set up your custom cloud's availability +''''''''''''''''''''''''''''''''''''''' -Continue working in the **Create custom cloud** workflow by taking the following steps: +Select in what projects you'll be able to use your new custom cloud to create services. You can make your cloud available for all the projects in your organization, selected organizational units, or specific projects only. -1. From the **Assign projects** dropdown menu, select projects for which you want your custom cloud to be available. -2. To add customer contacts, select their roles using the **Role** dropdown menu, and provide email addresses in the **Email** field. Using **+**, add as many customer contacts as needed for your custom cloud. +Continue working in the **Create custom cloud** wizard by taking the following steps: - .. note:: +#. In the **Custom cloud's availability in your organization** section, select either: - The customer contact information is used by the Aiven support team to contact you in case any technical issue with the custom cloud needs fixing. + * **By default for all projects** to make your custom cloud available in all existing and future projects in the organization -3. Select **Finish**. + or + + * **By selection** to pick specific projects or organizational units where you want your custom cloud to be available. + +#. If you go for the **By selection** option, dropdown menus **Assign organizational units** and **Assign projects** show up. Use them to select organizational units and/ or projects in which you want to be able to use your custom cloud. + +.. note:: + + By selecting an organizational unit, you make your custom cloud available from all the projects in this unit. + +Add customer contacts +''''''''''''''''''''' + +Select at least one person whom Aiven can contact in case any technical issue with the custom cloud needs fixing. Continue working in the **Create custom cloud** wizard by taking the following steps: + +1. In the **Customer contacts** section, select a contact person's role using the **Job title** dropdown menu, and provide their email address in the **Email** field. +2. Use **+ Add another contact** to add as many customer contacts as needed for your custom cloud. +3. Select **Create**. .. topic:: Result - The custom cloud process has been initiated for you, which is communicated in the the **Create custom cloud** workflow. + The custom cloud process has been initiated for you, which is communicated in the the **Create custom cloud** wizard as **Creating your custom cloud**. Complete the cloud setup '''''''''''''''''''''''' -You're all set. Select **Done** to close the **Create custom cloud** workflow. +You're all set. Select **Close** to close the **Create custom cloud** wizard. .. topic:: Result - The deployment of your new custom cloud might take a few minutes. As soon as it's over, and your custom cloud is ready to use, you'll be able to see it on the list of your custom clouds in the **Bring you own cloud** view. + The deployment of your new custom cloud might take a few minutes. As soon as it's over, and your custom cloud is ready to use, you'll be able to see it on the list of your custom clouds in the **Bring your own cloud** view. + +.. note:: + + Your new custom cloud is ready to use only after its status changes to **Active**. + +Check your cloud's status +''''''''''''''''''''''''' + +You can check the status of your custom cloud by taking the following steps: -Related reading ---------------- +1. Log in to `Aiven Console `_ as an administrator. +2. Select the organization you want to use from the dropdown menu in the top right corner. +3. From the top navigation bar, select **Admin**. +4. From the left sidebar, select **Bring your own cloud**. +5. In the **Bring your own cloud** view, identify your new cloud on the list of available clouds and check its status in the **Status** column. + +.. topic:: Result + + If your custom cloud's status is **Active**, its deployment has been completed. Your custom cloud is ready to use and you can see it on the list of your custom clouds in the **Bring your own cloud** view. Now you can create services using your new custom cloud. + +Next steps +---------- + +Destroy the Terraform resources +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +As soon as you new custom cloud gets the **Active** status, remove the Terraform resources your created in your AWS account while creating the cloud. See the guidelines on how to use the ``destroy`` command in `Command: destroy `_. + +Create a service using the new cloud +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To create a service in `Aiven Console `_ using your new custom cloud, follow the guidelines in :doc:`Create a new service `. + +When you get to the **Select service region** step while setting up your service in `Aiven Console `_, you'll be able to select **Custom clouds** from among available regions. + +Related pages +------------- -* :doc:`Bring your own cloud ` +* :doc:`About bring your own cloud (BYOC) ` +* :doc:`Enable bring your own cloud (BYOC) ` * :doc:`Assign a project to your custom cloud ` * :doc:`Add customer's contact information for your custom cloud ` * :doc:`Rename your custom cloud ` diff --git a/docs/platform/howto/byoc/delete-custom-cloud.rst b/docs/platform/howto/byoc/delete-custom-cloud.rst new file mode 100644 index 0000000000..c53cd4610c --- /dev/null +++ b/docs/platform/howto/byoc/delete-custom-cloud.rst @@ -0,0 +1,70 @@ +Delete your AWS custom cloud in Aiven +===================================== + +Delete a :doc:`custom cloud ` so that it's no longer available in your Aiven organizations, units, or projects. + +.. important:: + + Custom cloud configuration in Aiven is an :doc:`early availability feature `. You cover the costs associated with building and maintaining your custom cloud: payments for your integrated AWS infrastructure and Aiven services within the custom cloud. + +About deleting custom clouds +---------------------------- + +After deleting a custom cloud, the data co-hosted in this cloud will no longer be available from the Aiven platform. Before deleting your custom cloud, make sure there are no active services using this cloud. + +Impact on your Aiven resources +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The deletion impacts mostly resources on the Aiven site, such as cloud configuration files. + +Impact on your AWS account resources +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +A bastion service and the corresponding EC2 instance are deleted as a consequence of your custom cloud's removal. As for resources created when applying the Terraform template to create the custom cloud, they are not removed after deleting your custom cloud.Unless you've removed them earlier, you're advised to do that if deleting your cloud. + +Prerequisites +------------- + +* Administrator's role for your Aiven organization +* At least one :doc:`custom cloud created ` in your Aiven organization +* No running services that uses a custom cloud to be deleted +* Access to `Aiven Console `_ + +Delete your cloud +----------------- + +1. Log in to `Aiven Console `_ as an administrator. +2. Select the organization you want to use from the dropdown menu in the top right corner. +3. From the top navigation bar, select **Admin**. +4. From the left sidebar, select **Bring your own cloud**. +5. In the **Bring your own cloud** view, select one of the clouds available on the list. +6. In the selected cloud's page, use the ellipsis (**...**) menu in the top right corner to select **Delete**. +7. Make sure you understand the impact and, if so, confirm that you want to delete the cloud by selecting **Delete** in the **Warning** window. + +.. topic:: Result + + Your custom cloud has been deleted. + +.. important:: + + Remember to remove the resources created in your AWS account when applying the Terraform template to create the custom cloud. They are not removed automatically after deleting the cloud. Unless you've removed them earlier, use the ``terraform destroy`` command to delete them. See the guidelines on how to use the ``destroy`` command in `Command: destroy `_. + +Check it out +------------ + +You can verify if you deleted your cloud successfully by taking the following steps: + +1. Log in to `Aiven Console `_ as an administrator. +2. Select the organization you want to use from the dropdown menu in the top right corner. +3. From the top navigation bar, select **Admin**. +4. From the left sidebar, select **Bring your own cloud**. +5. In the **Bring your own cloud** view, see the list of the available clouds to make sure the cloud you intended to delete is actually gone. + +Related pages +------------- + +* :doc:`Bring your own cloud ` +* :doc:`Enable bring your own cloud (BYOC) ` +* :doc:`Create a custom cloud in Aiven ` +* :doc:`Assign a project to your custom cloud ` +* :doc:`Add customer's contact information for your custom cloud ` diff --git a/docs/platform/howto/byoc/enable-byoc.rst b/docs/platform/howto/byoc/enable-byoc.rst new file mode 100644 index 0000000000..9b76ebd706 --- /dev/null +++ b/docs/platform/howto/byoc/enable-byoc.rst @@ -0,0 +1,70 @@ +Enable bring your own cloud (BYOC) +================================== + +Enabling :doc:`the bring your own cloud (BYOC) feature ` allows you to :doc:`create custom clouds ` in your Aiven organization. For more information on BYOC and custom clouds, check +:doc:`Bring your own cloud (BYOC) `. + +.. note:: + + Enabling :doc:`the BYOC feature ` or creating custom clouds in your Aiven environment does not affect the configuration of your existing Aiven organizations, projects, or services. This only allows you to run Aiven services in your cloud provider account. + +About enabling BYOC +------------------- + +To be able to create custom clouds on the Aiven platform, first you need to enable the BYOC feature. `Aiven Console `_ offers a quick and easy way to set up a short call with the Aiven sales team to identify your use cases and confirm the requirements. In the call, we make sure BYOC can address them, and we check your environment eligibility for the feature. + +.. important:: + + Before getting down to enabling BYOC, check the availability of the feature in :ref:`Who is eligible for BYOC `, make sure you understand all the :ref:`limitations `, and meet all the :ref:`prerequisites `. + +.. _byoc-enable-limitations: + +Limitations +----------- + +* You need at least the Priority tier of Aiven support services to be eligible for activating BYOC. +* BYOC is supported with the :ref:`standard deployment ` model only. +* Only organization's administrators can request enabling BYOC. + +.. _byoc-enable-prerequisites: + +Prerequisites +------------- + +* Administrator's role for your Aiven organization +* Access to `Aiven Console `_ +* Active account with your cloud provider + +Enable BYOC +----------- + +1. Log in to `Aiven Console `_ as an administrator. +2. Select the organization you want to use from the dropdown menu in the top right corner. +3. From the top navigation bar, select **Admin**. +4. From the left sidebar, select **Bring your own cloud**. +5. In the **Bring your own cloud** view, select **Contact us**. +6. In the **Contact us** window, enter your email address and country. Select the cloud provider you want to use, add any other information you think might be relevant, and select **Confirm**. + + The scheduling assistant shows up so that you can schedule a short call with the Aiven sales team to proceed on your BYOC enablement request. + +7. Using the scheduling assistant, select a date and time when you want to talk to our sales team to share your requirements and make sure BYOC suits your needs. Confirm the selected time, make sure you add the call to your calendar, and close the the scheduling assistant. +8. Join the scheduled call with our sales team to follow up with them on enabling BYOC in your environment. + + If the call reveals BYOC addresses your needs and your environment is eligible for BYOC, the feature will be enabled for your Aiven organization. + +Next steps +---------- + +With BYOC activated in your Aiven organization, you can use custom clouds: + +* :ref:`Create them yourself using Aiven Console if the cloud provider you selected was AWS ` +* :ref:`Request the Aiven team to create one if the cloud provider you selected was GCP or Azure `. + +Related pages +------------- + +* :doc:`Create a custom cloud ` +* :doc:`About bring your own cloud (BYOC) ` +* :doc:`Assign a project to your custom cloud ` +* :doc:`Add customer's contact information for your custom cloud ` +* :doc:`Rename your custom cloud ` diff --git a/docs/platform/howto/byoc/rename-custom-cloud.rst b/docs/platform/howto/byoc/rename-custom-cloud.rst index bad5014cbe..b39cb8cd80 100644 --- a/docs/platform/howto/byoc/rename-custom-cloud.rst +++ b/docs/platform/howto/byoc/rename-custom-cloud.rst @@ -1,11 +1,11 @@ -Rename your custom cloud -======================== +Rename your AWS custom cloud in Aiven +===================================== -.. important:: +Change the name of your :doc:`custom cloud `. - Creating custom clouds in your Aiven organization requires enabling :doc:`the bring your own cloud (BYOC) feature `, which is a :doc:`limited availability feature `. If you're interested in trying it out, contact the sales team at `sales@Aiven.io `_. +.. important:: -This article details how to change the name of your custom cloud in `Aiven Console `_. + Custom cloud configuration in Aiven is an :doc:`early availability feature `. You cover the costs associated with building and maintaining your custom cloud: payments for your integrated AWS infrastructure and Aiven services within the custom cloud. About renaming custom clouds ---------------------------- @@ -23,10 +23,12 @@ Rename your cloud ----------------- 1. Log in to `Aiven Console `_ as an administrator. -2. From the left sidebar, select **Bring your own cloud**. -3. In the **Bring you own cloud** view, select one of the clouds available on the list. -4. In the selected cloud's page, use the ellipsis (**...**) menu in the top right corner to select **Rename**. -5. In the **Rename custom cloud** window, enter a new name into the **Custom cloud name** field and select **Rename**. +2. Select the organization you want to use from the dropdown menu in the top right corner. +3. From the top navigation bar, select **Admin**. +4. From the left sidebar, select **Bring your own cloud**. +5. In the **Bring your own cloud** view, select one of the clouds available on the list. +6. In the selected cloud's page, use the ellipsis (**...**) menu in the top right corner to select **Rename**. +7. In the **Rename custom cloud** window, enter a new name into the **Custom cloud name** field and select **Rename**. .. topic:: Result @@ -38,11 +40,13 @@ Check it out You can preview the updated name of your cloud by taking the following steps: 1. Log in to `Aiven Console `_ as an administrator. -2. From the left sidebar, select **Bring your own cloud**. -3. In the **Bring you own cloud** view, see the list of the available clouds and identify the cloud with the name you updated. +2. Select the organization you want to use from the dropdown menu in the top right corner. +3. From the top navigation bar, select **Admin**. +4. From the left sidebar, select **Bring your own cloud**. +5. In the **Bring your own cloud** view, see the list of the available clouds and identify the cloud with the name you updated. -Related reading ---------------- +Related pages +------------- * :doc:`Bring your own cloud ` * :doc:`Create a custom cloud in Aiven ` diff --git a/docs/platform/howto/change-support-tier.rst b/docs/platform/howto/change-support-tier.rst deleted file mode 100644 index 3944bb04f0..0000000000 --- a/docs/platform/howto/change-support-tier.rst +++ /dev/null @@ -1,31 +0,0 @@ -Upgrade your support tier in the Aiven Console -============================================== - -Aiven provides free Basic tier support for all paid services. Beyond this standard support, Aiven also offers `three additional support tiers `_ with faster response times, phone support, and other services. - -You can upgrade your support tier within the Aiven Console. If you have questions or want to downgrade your support tier, contact your account team. - -Upgrade your support tier -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To upgrade your organization's support tier to the Priority, Business, or Enterprise tier in the `Aiven Console `_: - -#. In the organization, click **Support**. - -#. In the **Current support tier** section, click **Upgrade to Enterprise**. - -#. Choose the support tier you want to upgrade to and click **Choose tier**. - -#. Select a **Start date**. - - .. note:: - If you select the current month, you will be charged a percentage of the total service spend for the whole month, starting from the 1st. - -#. Select a **Billing group**. - - .. important:: - The support costs for all current and future services in the selected organization and all of its organizational units will be added to the invoice for this billing group. - -#. Click **Upgrade tier**. - -It typically takes 1-2 business days to set up the new support tier. You can view the status of your request on the support page under **Current support tier**. \ No newline at end of file diff --git a/docs/platform/howto/cleanup-powered-off-services.rst b/docs/platform/howto/cleanup-powered-off-services.rst index 5f782dce71..e559319bda 100644 --- a/docs/platform/howto/cleanup-powered-off-services.rst +++ b/docs/platform/howto/cleanup-powered-off-services.rst @@ -8,7 +8,9 @@ The Aiven platform receive regular maintenance updates to keep the services upda Keeping services in powered-off state for a long time lowers the feasibility of a smooth upgrade path, making it harder for Aiven to continue supporting your needs. .. note:: + * It is recommended that you regularly review your services and delete those that are no longer needed. This allows Aiven to focus on supporting the services that you actively use and better utilize platform resources. + * If a service has been powered off for 90 days, you will receive email notifications reminding you that the service has been inactive for a prolonged period. If the service remains powered off for 180 consecutive days, it is subject to automatic deletion. @@ -17,5 +19,5 @@ Delete a powered-off service 1. In `Aiven Console `_, select **Services** in the left navigation bar to display a list of all services. 2. On the **Services** page, use the search bar to locate a specific powered off service or the filter to display a list of services with status **Powered off**. -3. Select the powered off service to access its **Overview** page, and select **Delete service** from the meatballs menu in the top right corner. +3. Select the powered off service to access its **Overview** page, and select **Delete service** from the actions menu in the top right corner. 4. In the **Delete confirmation** window, enter the name of the service to delete and select **Delete** to confirm the action. diff --git a/docs/platform/howto/console-fork-service.rst b/docs/platform/howto/console-fork-service.rst index 37dc20a5f5..2e03da4976 100644 --- a/docs/platform/howto/console-fork-service.rst +++ b/docs/platform/howto/console-fork-service.rst @@ -28,13 +28,17 @@ Fork a service using the Aiven client (CLI) 1. Prepare the command to create a new service, this will contain the new copy of your data store. 2. Add the ``service_to_fork_from`` parameter to specify the service to use as the source. -Change service type accordingly with ``-t``, run the following command to see available options:: + Change service type accordingly with ``-t``, run the following command to see available options: - avn service types + .. code:: + + avn service types -For example, if you want to create a fork of your ``forker`` PostgreSQL® service, and name it ``forked``, the command would be something like:: +For example, if you want to create a fork of your ``forker`` PostgreSQL® service, and name it ``forked``, the command would be something like: - avn service create forked -t pg --plan business-4 -c service_to_fork_from=forker +.. code:: + + avn service create forked -t pg --plan business-4 -c service_to_fork_from=forker .. topic:: Result diff --git a/docs/platform/howto/create-service-integration.rst b/docs/platform/howto/create-service-integration.rst index 42bec3e76e..779515e88d 100644 --- a/docs/platform/howto/create-service-integration.rst +++ b/docs/platform/howto/create-service-integration.rst @@ -15,32 +15,40 @@ To get started, you need three services: Create an integration --------------------- -1. In `Aiven Console `_, :doc:`create the new services: ` Aiven for Apache Kafka®, Aiven for PostgreSQL®, and Aiven for Grafana®. You can choose your preferred cloud provider, region, and any plan from **startup** / **business** / **premium**. +1. In the `Aiven Console `_, :doc:`create new services ` including Aiven for Apache Kafka®, Aiven for PostgreSQL®, and Aiven for Grafana®. Choose your preferred cloud provider, region, and a plan from **startup**, **business**, or **premium**. -2. Once all three services are running in `Aiven Console `_, select the Aiven for PostgreSQL service from the **Services** page, and make sure the **Overview** page of your service is open. On the **Overview** page, go to **Service integrations** > **Manage integrations** > **Aiven solutions** > **Monitor Data in Grafana**. In the **Datasource integration** window, make sure the **Existing service** radio button is highlighted, and select the newly created Aiven for Grafana service. Select **Enable**. +2. Once all three services are running in the `Aiven Console `_, select the Aiven for PostgreSQL service from the **Services** page. Ensure you are on the **Overview** page of your service. + + a. Select to **Integrations** from the sidebar. + b. Under **Aiven solutions**, click **Monitor Data in Grafana**. + c. In the **Datasource integration** window, select the **Existing service** radio button and choose the Aiven for Grafana service you created. + d. Click **Enable**. -3. Enable receiving metrics from the Aiven for Apache Kafka service. On the **Overview** page of your Aiven for PostgreSQL service, go to **Service integrations** > **Manage integrations** > **Aiven solutions** > **Receive Metrics**. In the **Metrics integration** window, make sure the **Existing service** radio button is highlighted, and select the newly created Aiven for Apache Kafka service. Select **Enable**. +3. To enable metrics from the Aiven for Apache Kafka service, go to the **Overview** page of your Aiven for PostgreSQL service. + + a. Select **Service integrations** from the sidebar. + b. Under **Aiven solutions**, select **Receive Metrics**. + c. In the **Metrics integration** window, ensure the **Existing service** radio button is selected and choose the Aiven for Apache Kafka service. + d. Click **Enable**. .. note:: - You have now the advanced Aiven for Apache Kafka telemetry data flowing to the Aiven for PostgreSQL service. + This step allows advanced Aiven for Apache Kafka telemetry data to flow into the Aiven for PostgreSQL service. -4. Open the Grafana dashboard to see the Aiven for Apache Kafka metrics data. +4. To view the Aiven for Apache Kafka metrics data in Grafana: - 1. In `Aiven Console `_, select your Aiven for Grafana service from the **Services** view. - 2. In the **Overview** page of your service, navigate to the **Connection information** section. - 3. Use **Service URI** from the **Connection information** section to access the Grafana service in your browser. To log in, use the credentials available in the the **Connection information** section (the ``avnadmin`` user and the password). + a. In the `Aiven Console `_, select your Aiven for Grafana service from the **Services** page. + b. In the **Connection information** section on the service **Overview** page, copy the **Service URI** from the **Connection information** to access the Grafana service in your browser. + c. Log in using the credentials provided in the **Connection information** section (the ``avnadmin`` user and the password). .. note:: - If you cannot see a dashboard after logging in, search for a dashboard from the top-left corner in the Grafana console to find dashboard ``Aiven Kafka - - Resources``. - - This dashboard is a predefined view that is automatically maintained by Aiven. + If you don't see a dashboard after logging in, search for ``Aiven Kafka - - Resources`` from the top-left corner in the Grafana console. This is a predefined dashboard automatically maintained by Aiven. .. note:: - It may take a minute to start getting data into to the dashboard view if you just enabled the integrations. The view can be refreshed by reloading in the top-right corner. You can add custom dashboards by either defining them from scratch in Grafana or by saving a copy of the predefined dashboard under a different name that does not start with *Aiven*. + Data may take a minute to appear on the dashboard if you've just enabled the integrations. Refresh the view by reloading the page from the top-right corner. You can create custom dashboards either from scratch in Grafana or by saving a copy of the predefined dashboard under a different name that does not start with *Aiven*. .. warning:: - Any changes that you make to the predefined dashboard are eventually automatically overwritten by the system. + Any modifications to the predefined dashboard will be automatically overwritten by the system in time. diff --git a/docs/platform/howto/disk-autoscaler.rst b/docs/platform/howto/disk-autoscaler.rst new file mode 100644 index 0000000000..20fc55ab23 --- /dev/null +++ b/docs/platform/howto/disk-autoscaler.rst @@ -0,0 +1,300 @@ +Scale your Aiven service disks automatically +============================================ + +.. important:: + + Service disk autoscaler is a :doc:`limited availability feature `. If you're interested in trying it out, contact the sales team at sales@Aiven.io. + +Discover the service disk autoscaler and its capabilities. Find out how it works and how to use it with your Aiven services. + +.. topic:: Pricing + + Costs of using disk autoscaler depend on your service type and plan. You're only charged for additional storage space actually provisioned for your service. Costs of using disk autoscaler correspond to costs of using dynamic disk sizing (DDS), which you can check in `Aiven Plans and Pricing `_. + +Why use disk autoscaling +------------------------ + +Service disk autoscaler increases disk storage capacity automatically when the disk is running out of space. + +.. note:: + + Currently, service disk autoscaler doesn't support scaling down. + +* Disk autoscaling allows you to improve the cost-efficiency of operating your Aiven services: You can start with a regular-sized disk and only have it scaled up when needed with no risk of running out of disk space at any point. + +* Disk autoscaling helps improve service resiliency eliminating the risk of a service becoming non-functional as a consequence of running out of disk space. Use disk autoscaling to make sure your service remains operational in case of unexpected high demand for disk space. + +How it works +------------ + +There are a few steps illustrating how disk autoscaler works: + +1. You create a disk autoscaler integration endpoint in your Aiven project setting the maximum additional storage at the same time. +2. You enable a disk autoscaler integration for your service using the new disk autoscaler integration endpoint. +3. From that point onward, the evaluation of disk space availability for your service is done every 30 seconds. +4. When disk storage consumption reaches the threshold for a specific service, disk autoscaler increases available storage space by 10% every time taking the currently-used disk space as a baseline. + +.. topic:: AUTOSCALE THRESHOLDS PER SERVICE TYPE + + The threshold at which disk autoscaling is triggered is a percentage of the available disk storage capacity and depends on a service type: + + * Aiven for Apache Cassandra®: 35% of the available disk storage capacity + * Aiven for OpenSearch®: 75% of the available disk storage capacity + * All the other Aiven service types: 85% of the available disk storage capacity + +Prerequisites +------------- + +* Aiven organization, project, and service up and running +* :doc:`Dynamic disk sizing (DDS) ` supported for the service plan and the cloud hosting the service +* Role of the operator for your Aiven organization, project, and service +* Depending on what interface you'd like to use for interacting with disk autoscaler: + + * Access to `Aiven Console `_ + * `Aiven API `_ + * :doc:`Aiven CLI client ` + +Enable disk autoscaler +---------------------- + +To enable disk autoscaling on your Aiven service, you need to create an autoscaler integration endpoint and enable autoscaler integration with your service using the new endpoint. You can set up disk autoscaling in `Aiven Console `_, using Aiven API, or Aiven CLI client. + +Enable in Aiven Console +~~~~~~~~~~~~~~~~~~~~~~~ + +Create an autoscaler endpoint +''''''''''''''''''''''''''''' + +1. Log in to `Aiven Console `_ and navigate to a desired organization and project. +2. On the **Services** page of your project, select **Integration endpoints** from the sidebar. +3. On the **Integration endpoints** page, select **Disk autoscaler** > **Add new endpoint**. +4. In the **Create new autoscaler endpoint** window, enter an endpoint name, specify a maximum additional disk storage that you want to allow for disk autoscaling purposes, and select **Create**. + +Enable on a service +''''''''''''''''''' + +1. Log in to `Aiven Console `_ and navigate to a desired organization, project, and service. +2. On the **Overview** page of your service, select **Integrations** from the sidebar. +3. On the **Integrations** page, navigate to **External integrations** and select **Disk autoscaler**. +4. In the **Autoscaler integration** window, select the newly-created autoscaler integration endpoint from the dropdown menu and select **Enable**. + +Enable with Aiven API +~~~~~~~~~~~~~~~~~~~~~ + +To enable disk autoscaler on your service via `Aiven API `_, call the `ServiceIntegrationEndpointCreate `_ endpoint on your project and, next, the `ServiceIntegrationCreate `_ endpoint to create an autoscaler integration on your service. + +1. Call the `ServiceIntegrationEndpointCreate `_ endpoint on your project passing the following in the request body: + + * Endpoint name (path and request body parameters) + * ``endpoint_type`` (request body): ``disk_storage`` + * ``max_additional_storage`` (request body > ``user_config`` object) + + .. code-block:: bash + + curl --request POST \ + --url https://api.aiven.io/v1/project/{project_name}/integration_endpoint \ + --header 'Authorization: Bearer REPLACE_WITH_YOUR_BEARER_TOKEN' \ + --header 'content-type: application/json' \ + --data + '{ + "endpoint_name": "REPLACE_WITH_ENDPOINT_NAME", + "endpoint_type": "disk_storage", + "user_config": { + "autoscaler": { + "max_additional_storage": "REPLACE_WITH_DESIRED_VALUE_IN_GB" + } + } + }' + +2. Call the `ServiceIntegrationCreate `_ endpoint on your service passing the following in the request body: + + * ``dest_endpoint_id``: ID of your new autoscaler integration endpoint + * ``integration_type``: ``autoscaler`` + * ``source_project``: the name of a project your autoscaler endpoint is created for + * ``source_service``: the name of a service for which you want to enable autoscaler + + .. code-block:: bash + + curl --request POST \ + --url https://api.aiven.io/v1/project/{project_name}/integration \ + --header 'Authorization: Bearer REPLACE_WITH_YOUR_BEARER_TOKEN' \ + --header 'content-type: application/json' \ + --data + '{ + "dest_endpoint_id": "REPLACE_WITH_YOUR_NEW_AUTOSCALER_ENDPOINT_ID", + "integration_type": "autoscaler", + "source_project": "REPLACE_WITH_PROJECT_NAME", + "source_service": "REPLACE_WITH_SERVICE_NAME" + }' + +Enable with Aiven CLI +~~~~~~~~~~~~~~~~~~~~~ + +You can enable disk autoscaler for your service with the :doc:`Aiven CLI client ` by run the commands to create the following: +* Autoscaler integration endpoint on your project (:ref:`avn service integration-endpoint-create `) +* Autoscaler integration on your service using the new autoscaler integration endpoint (:ref:`avn service integration-create `) + +1. Run the following command to create an autoscaler integration endpoint on your project: + + .. code-block:: bash + + avn service integration-endpoint-create \ + --project YOUR_PROJECT_NAME \ + --endpoint-name DESIRED_ENDPOINT_NAME \ + --endpoint-type disk_storage \ + --user-config-json '{"max_additional_storage":"REPLACE_WITH_DESIRED_VALUE_IN_GB"}' + +2. Run the :ref:`avn service integration-endpoint-list ` command to retrieve the identifier of the new endpoint: + + .. code-block:: shell + + avn service integration-endpoint-list --project YOUR_PROJECT_NAME + +3. Run the following command to create an autoscaler integration on your service using the new autoscaler integration endpoint: + + .. code-block:: bash + + avn service integration-create + --dest-service YOUR_SERVICE_NAME \ + --integration-type autoscaler \ + --source-endpoint-id ID_OF_AUTOSCALER_INTEGRATION_ENDPOINT + +Configure disk autoscaler +------------------------- + +After enabling disk autoscaler, any time later you can update the maximum additional disk storage allowed for autoscaling purposes. You can use `Aiven Console `_, Aiven API, or Aiven CLI to do that. + +Configure in Aiven Console +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +1. Log in to `Aiven Console `_ and navigate to a desired organization and project. +2. On the **Services** page of your project, select **Integration endpoints** from the sidebar. +3. On the **Integration endpoints** page, select **Disk autoscaler**, find your endpoint on the list of the existing autoscaler endpoints, select the **Edit endpoint** icon. +4. In the **Edit endpoint** window, specify a new value for the maximum additional disk storage to be allowed for autoscaling, and select **Update**. + +Configure with Aiven API +~~~~~~~~~~~~~~~~~~~~~~~~ + +You can use `Aiven API `_ to configure the maximum additional disk storage allowed for autoscaling purposes on your service. + +Call the `ServiceIntegrationEndpointUpdate `_ endpoint passing the following parameters in your request: + +* ``project_name`` (path parameter) +* ``integration_endpoint_id`` (path parameter) +* ``max_additional_storage`` (request body > ``user_config`` object) + +.. code-block:: bash + + curl --request PUT \ + --url https://api.aiven.io/v1/project/{project_name}/integration_endpoint/{integration_endpoint_id} \ + --header 'Authorization: Bearer REPLACE_WITH_YOUR_BEARER_TOKEN' \ + --header 'content-type: application/json' \ + --data + '{ + "user_config": { + "autoscaler": { + "max_additional_storage": "REPLACE_WITH_DESIRED_VALUE_IN_GB" + } + } + }' + +Configure with Aiven CLI +~~~~~~~~~~~~~~~~~~~~~~~~ + +You can use the :doc:`Aiven CLI client ` to configure the maximum additional disk storage allowed for autoscaling purposes on your service. + +Run the :ref:`avn service integration-endpoint-update ` command passing a desired maximum additional disk storage as PARAMETER_VALUE_IN_GB: + +.. code-block:: bash + + avn service integration-endpoint-update AUTOSCALER_ENDPOINT_ID + --user-config-json '{"max_additional_storage":"PARAMETER_VALUE_IN_GB"}' + +Disable disk autoscaler +----------------------- + +To disable disk autoscaling on your Aiven service, you need to disconnect the service from the autoscaler integration endpoint. You can also delete the integration endpoint itself if you don't need it for future purposes. You can disable disk autoscaling in `Aiven Console `_, using Aiven API, or Aiven CLI client. + +Disable in Aiven Console +~~~~~~~~~~~~~~~~~~~~~~~~ + +Disable on a service +'''''''''''''''''''' + +1. Log in to `Aiven Console `_ and navigate to a desired organization, project, and service. +2. On the **Overview** page of your service, select **Integrations** from the sidebar. +3. On the **Integrations** page, find your autoscaler service integration at the top, select the **Actions** (**...**) menu > **Disconnect**. +4. In the **Disconnect service integration** window, select **Disconnect**. + +Delete an autoscaler endpoint +''''''''''''''''''''''''''''' + +1. Log in to `Aiven Console `_ and navigate to a desired organization and project. +2. On the **Services** page of your project, select **Integration endpoints** from the sidebar. +3. On the **Integration endpoints** page, select **Disk autoscaler**, find your endpoint on the list of the existing autoscaler endpoints, select the **Delete endpoint** icon and **Delete** in the **Confirmation** window. + +Disable with Aiven API +~~~~~~~~~~~~~~~~~~~~~~ + +To disable disk autoscaler on your service via `Aiven API `_, call the `ServiceIntegrationDelete `_ endpoint to delete an autoscaler integration on your service and, next, the `ServiceIntegrationEndpointDelete `_ endpoint on your project to delete the autoscaler integration endpoint if you don't need it for any future purposes. + +`ServiceUpdate `_ endpoint passing ``{"service disk autoscaler": {"enabled": true}}`` in the ``user_config`` object. + +1. Call the `ServiceIntegrationDelete `_ endpoint on your service passing the following in the request body: + + * ``project_name`` (path parameter): the name of a project in which your autoscaler service integration is enabled + * ``integration_id`` (path parameter): ID of an autoscaler service integration you want to disable + + .. code-block:: bash + + curl --request DELETE \ + --url https://api.aiven.io/v1/project/{project_name}/integration/{integration_id} \ + --header 'Authorization: Bearer REPLACE_WITH_YOUR_BEARER_TOKEN' + +2. Call the `ServiceIntegrationEndpointDelete `_ endpoint on your project passing the following in the request body: + + * ``project_name`` (path parameter): the name of a project in which your autoscaler integration endpoint is created + * ``integration_endpoint_id`` (path parameter): ID of an autoscaler integration endpoint you want to delete + + .. code-block:: bash + + curl --request DELETE \ + --url https://api.aiven.io/v1/project/{project_name}/integration_endpoint/{integration_endpoint_id} \ + --header 'Authorization: Bearer REPLACE_WITH_YOUR_BEARER_TOKEN' + +Disable with Aiven CLI +~~~~~~~~~~~~~~~~~~~~~~ + +You can disable disk autoscaler on your service with the :doc:`Aiven CLI client ` by run the commands to delete the following: + +* Autoscaler integration on your service +* Autoscaler integration endpoint on your project (if you don't need the autoscaler integration endpoint on your project for any future purposes). + +1. Retrieve the ID of an integration you want to disable by running the following command: + + .. code-block:: bash + + avn service integration-list SERVICE_NAME + +2. Run the following command to delete an autoscaler integration on your service: + + .. code-block:: bash + + avn service integration-delete INTEGRATION_ID + +3. Retrieve the ID of an autoscaler integration endpoint you want to delete by running the following command: + + .. code-block:: bash + + avn service integration-endpoint-list PROJECT_NAME + +3. Run the following command to delete an autoscaler integration endpoint on your project: + + .. code-block:: bash + + avn service integration-endpoint-delete ENDPOINT_ID + +Related pages +--------------- + +:doc:`Dynamic disk sizing (DDS) ` diff --git a/docs/platform/howto/integrations/access-jmx-metrics-jolokia.rst b/docs/platform/howto/integrations/access-jmx-metrics-jolokia.rst index c981f07e0f..51baddaad8 100644 --- a/docs/platform/howto/integrations/access-jmx-metrics-jolokia.rst +++ b/docs/platform/howto/integrations/access-jmx-metrics-jolokia.rst @@ -1,5 +1,3 @@ -Jolokia - Access JMX metrics via Jolokia =============================== @@ -31,14 +29,13 @@ Enabling Jolokia integration ------------------------------ To enable Jolokia integration for a specific service, follow these steps: -1. In the `Aiven console `_, open the service for which you want to enable Jolokia integration. -2. In the service **Overview** page, scroll to **Service integrations** and select **Manage integrations**. -3. In the **Integrations** page, select **Jolokia** from the list. +1. In the `Aiven Console `_, open the service for which you want to enable Jolokia integration. +2. On the **Overview** page of your service, navigate to the **Service integrations** section, and select **Manage integrations**. +3. On the **Integrations** page, select **Jolokia** from the list. 4. Select the Jolokia endpoint you created and select **Enable**. The system will configure the Jolokia endpoint on all service nodes, providing access to the metrics. - The Aiven Jolokia integration enables HTTP POST requests to retrieve values from service-specific metrics. It also supports bulk requests for batch collection of metrics. For more detailed information on the Jolokia protocol, refer to `Jolokia -documentation `__ . +documentation `__ . Several metrics are specific to a Kafka® broker. Therefore, you may need to query each node to obtain a comprehensive overview. The node IP is represented by a single DNS name. You can use the ``host`` command on Unix systems or the ``nslookup`` command on Windows systems to retrieve the list of IP addresses associated with a DNS name. diff --git a/docs/platform/howto/integrations/prometheus-metrics.rst b/docs/platform/howto/integrations/prometheus-metrics.rst index 23e940eec9..44480dca27 100644 --- a/docs/platform/howto/integrations/prometheus-metrics.rst +++ b/docs/platform/howto/integrations/prometheus-metrics.rst @@ -48,7 +48,8 @@ Aiven offers Prometheus endpoints for your services. To enable this feature, tak At the top of the **Integrations** page, you will see the Prometheus integration listed and status ``active``. -#. From the **Integrations** page, go to the **Overview** page > the **Connection information** section > the **Prometheus** tab. +#. Next, navigate to the service's **Overview** page, and then locate the **Connection information** section. +#. Click on the **Prometheus** tab. #. Copy **Service URI**, and use it in your browser to access the Prometheus dashboard. .. topic:: Result @@ -59,10 +60,29 @@ Aiven offers Prometheus endpoints for your services. To enable this feature, tak There might be a slight delay of approximately one minute before the metrics become available. + Accessing Prometheus in a VPC '''''''''''''''''''''''''''''' -If you use a VPC in your project, to access Prometheus, you need to go to `Aiven Console `_ > your project > the service you want to monitor using Prometheus > the **Overview** page > the **Advanced configuration** section and enable property ``public_access.prometheus``. +If you use a VPC in your project, follow these steps to access Prometheus: + +1. Access `Aiven Console `_. + +2. Select your project, and select the service you want to monitor using Prometheus. + +3. Click **Service settings** from the sidebar. + +4. In the **Cloud and network** section, click on the actions (**...**) menu. + +5. Choose **More network configurations**. + +6. In the **Network configuration** window, select **Add configuration options**. + +7. Search for the ``public_access.prometheus`` property and enable it. + +8. Click **Save configuration**. + + Configure Prometheus -------------------- @@ -133,7 +153,7 @@ Multi-node services .. note:: - For Aiven services with multiple nodes and a Replica URI, the primary DNS name does not include standby IP addresses. To track those, make sure to include the replica DNS names in the list. If you have ```` as ``public-example.aivencloud.com``, then you will need to add ``public-replica-example.aivencloud.com``. This applies to PostgreSQL®, MySQL®, and Redis®* services. + For Aiven services with multiple nodes and a Replica URI, the primary DNS name does not include standby IP addresses. To track those, make sure to include the replica DNS names in the list. If you have ```` as ``public-example.aivencloud.com``, then you will need to add ``public-replica-example.aivencloud.com``. This applies to PostgreSQL®, MySQL®, Apache Kafka®, and Redis®* services. View full list of metrics '''''''''''''''''''''''''' @@ -148,10 +168,11 @@ You can preview the full list of metrics in :doc:`Prometheus system metrics ` * :doc:`Datadog integration ` +* Configure Prometheus for Aiven for Apache Kafka® via Privatelink diff --git a/docs/platform/howto/list-support.rst b/docs/platform/howto/list-support.rst deleted file mode 100644 index ac6114123a..0000000000 --- a/docs/platform/howto/list-support.rst +++ /dev/null @@ -1,6 +0,0 @@ -Support -======== - -Browse through instructions for Aiven's support services. - -.. tableofcontents:: diff --git a/docs/platform/howto/manage-domains.rst b/docs/platform/howto/manage-domains.rst new file mode 100644 index 0000000000..4f668e9a25 --- /dev/null +++ b/docs/platform/howto/manage-domains.rst @@ -0,0 +1,95 @@ +Manage domains +=============== + +Verified domains let you manage users in your organization. + +There are two ways you can verify a domain: + +* by adding a DNS TXT record to the domain (recommended) +* by uploading an HTML file to your website + +After adding a domain, organization users automatically become :doc:`managed users `. + + +Add a domain using a DNS TXT record +------------------------------------- + +1. In the organization you want to add a domain to, click **Admin**. + +2. Click **Domains**. + +3. Click **Add domain**. + +4. Enter a **Domain name**. + +5. In the **Verification method**, select **Add a DNS TXT record to your domain host**. + +6. Click **Add domain**. + +7. In the **Verification method** column, click **DNS TXT record**. + +8. Copy the TXT record value. + +9. In another browser tab or window, log in to your domain hosting provider. + +10. Go to the DNS settings. + +11. In the DNS settings for your domain provider, create a TXT record with the following: + + .. list-table:: + :header-rows: 1 + :align: left + + * - Field name + - Value + * - Name + - ``_aiven-challenge.{your domain}`` + * - Record value + - The TXT record value you copied in the format ``token=,expiry=never`` + * - Type + - ``TXT`` + +12. In the Aiven Console, open the **Actions** menu and click **Verify**. + +It can take up to 72 hours for your DNS records to update the domain to be verified. If the domain is still not verified after that time, you can retry it by repeating the last step. + + +Add a domain using an HTML file +-------------------------------- + +#. In the organization that you want to add a domain to, click **Admin**. + +#. Click **Domains**. + +#. Click **Add domain**. + +#. Enter a **Domain name**. + +#. In the **Verification method**, select **Upload an HTML file to your website**. + +#. Click **Add domain**. + +#. In the **Verification method** column, click **HTML file upload**. + +#. Download the HTML file. + +#. Upload the HTML file to your website in the path ``/.well-known/aiven``. + +#. In the Aiven Console, open the **Actions** menu and click **Verify**. + + +Remove a domain +----------------- + +.. important:: + Removing a domain is an irreversible action. + +#. In the organization that you want to remove a domain from, click **Admin**. + +#. Click **Domains**. + +#. Find the domain that you want to remove and open the **Actions** menu. + +#. Click **Remove**. + +#. Confirm you want to remove the domain by clicking **Remove domain**. \ No newline at end of file diff --git a/docs/platform/howto/manage-organizations.rst b/docs/platform/howto/manage-organizations.rst new file mode 100644 index 0000000000..4ae2042704 --- /dev/null +++ b/docs/platform/howto/manage-organizations.rst @@ -0,0 +1,33 @@ +Manage organizations +===================== + +Delete an organization +----------------------- + +#. Delete all :doc:`projects ` in the organization and in the organizational units. + +#. Click **Admin**. + +#. Click **Organization**. + +#. Open each organizational unit by clicking its name then click **Delete** to delete it. + +#. After all the organizational units have been deleted, on the **Organization** page click **Delete**. + +#. Confirm that you want to delete the organization by clicking **Delete**. + + +Rename an organization +----------------------- + +#. In the organization, click **Admin**. + +#. Click **Organization**. + +#. Click **Rename**. + +#. Select **Rename**. + +#. Enter the new name. + +#. Click **Rename**. diff --git a/docs/platform/howto/manage-unassigned-projects.rst b/docs/platform/howto/manage-unassigned-projects.rst index 676ab0cfc6..6fddd65c21 100644 --- a/docs/platform/howto/manage-unassigned-projects.rst +++ b/docs/platform/howto/manage-unassigned-projects.rst @@ -1,35 +1,35 @@ Manage unassigned projects =========================== -An unassigned project is a project that isn't assigned to an organization or organizational unit. Projects that are part of an organization or unit are easier to maintain as common settings like authentication are centrally managed at the organization level. +An unassigned project is a project that isn't assigned to an organization or organizational unit. Projects that are part of an organization or unit are easier to maintain as common settings like authentication are centrally managed at the organization level. These projects also get the latest feature updates. .. important:: - Aiven is planning to discontinue support for unassigned projects. Organize your projects in the way that works best for you by assigning them now to an organization or organizational unit. If you have unassigned projects after that date, they will be assigned to your organization. If you don't have an organization, one will be created and the unassigned projects will be moved there. + It's no longer possible to create unassigned projects and projects that are not assigned to an organization or unit will not be updated with new features. We recommend assigning all projects to an organization or unit using the instructions on this page. Learn more about :doc:`organizations, organizational units, and projects `. Manage unassigned projects in Aiven Console ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Assign standalone projects to an organization or unit ------------------------------------------------------- +Assign projects to an organization or unit +-------------------------------------------- -If you don't have an organization, you need to :ref:`create an organization ` first and then assign your projects to it. +If you don't have an organization, you need to :ref:`create one ` first. -To assign standalone projects to an organization or unit in the `Aiven web console `_: +To assign standalone projects to an organization or unit in the `Aiven Console `_: 1. Click **Projects**. 2. Click **View unassigned projects** to see a list of all projects not assigned to an organization or organizational unit. If you don't see **View unassigned projects** in the dropdown menu, then you don't have any unassigned projects. -3. On the **Unassigned projects** page, click **Assign project**. +3. On the **Unassigned projects** page, click **Assign project** for a project you want to assign. 4. Add any other projects that you want to assign to the same organization or unit. 5. Select the organization or organizational unit. -6. Click **Associate Projects**. +6. Click **Assign projects**. .. _create-org: Create an organization ----------------------- -Projects must be assigned to organizations or units within organizations. We recommend using **only one organization** and creating organizational units to group your projects. +Projects must be assigned to organizations or units. We recommend using only one organization and creating organizational units to group your projects. If you don't have any organization yet, you can create one: diff --git a/docs/platform/howto/manage-vpc-peering.rst b/docs/platform/howto/manage-vpc-peering.rst index 14383484ae..e1490c3fe1 100644 --- a/docs/platform/howto/manage-vpc-peering.rst +++ b/docs/platform/howto/manage-vpc-peering.rst @@ -6,7 +6,7 @@ Virtual Private Cloud (VPC) peering is a method of connecting separate AWS, Goog .. _platform_howto_setup_vpc_peering: Configure VPC peering ----------------------------------------- +--------------------- In Aiven, VPC peering is configured as a project and region-specific setting. This means that all services created and running use the same VPC peering connection. If necessary, you can use different connections for VPC peering across multiple projects. @@ -30,7 +30,7 @@ To set up VPC peering for your Aiven project: The state of the VPC is shown in the table. Cloud-specific VPC peering instructions ----------------------------------------- +--------------------------------------- - :doc:`Set up VPC peering on Amazon Web Services (AWS) ` - :doc:`Set up VPC peering on Google Cloud Platform (GCP) ` @@ -41,7 +41,7 @@ Cloud-specific VPC peering instructions Depending on the cloud provider that you selected for the VPC connection, you also have to accept a VPC peering connection request or set up a corresponding VPC peering connection to Aiven. Deploy new services to a VPC -------------------------------- +---------------------------- When you create a new service, your peered VPC is available as a new geolocation on the **VPC** tab under **Select service region**. It can take a few minutes for a newly created VPC to appear for service deployments. @@ -50,45 +50,54 @@ When you create a new service, your peered VPC is available as a new geolocation The service nodes use firewall rules to allow only connections from private IP ranges that originate from networks on the other end of VPC peering connections. You can only deploy services to a VPC if they belong to the project where that specific VPC was created. Delete an existing VPC and VPC peering ----------------------------------------- +-------------------------------------- Before deleting an existing VPC from `Aiven Console `_, you should move out any active services from that VPC. To delete a VPC, navigate to `Aiven Console `_ > **VPCs**. Find your VPC and select **Delete** from the meatballs menu for this VPC. Once the VPC is deleted, the cloud provider side of the peering connection will go to an inactive or deleted state. Migrate a public service to a VPC ------------------------------------ +--------------------------------- You can migrate any Aiven service to a different VPC: #. In `Aiven Console `_, go to your service. -#. On the **Overview** page of your service, go in to **Cloud and VPC** section, click **Migrate cloud**. +#. On the **Overview** page of your service, select **Service settings** from the sidebar. -#. In the **Region** section, select the **VPC** tab. +#. On the **Service settings** page, navigate to the **Cloud and network** section and select **Change cloud or region** from the actions (**...**) menu. -#. Select the VPC that you want to use. - -#. Click **Migrate**. +#. In the **Migrate service to another cloud** window > the **Region** section, select the **VPCs** tab, select the VPC that you want to use, and select **Migrate**. Access VPC services from the public internet ------------------------------------------------ +-------------------------------------------- When you move your service to a VPC, access from public networks is blocked by default. If you switch to public access, a separate endpoint is created with a public prefix. You can enable public Internet access for your services by following the :doc:`Enable public access in a VPC ` instructions. -IP filtering (the **Allowed IP addresses** list on the service's **Overview** page) is still available for a service deployed to a VPC where both public and private access are allowed. We recommend that you use IP filtering when your VPC service is also exposed to the public internet. +IP filtering (the **Service settings** page > the **Cloud and network** section > the actions (**...**) menu > **Set public IP filters**) is still available for a service deployed to a VPC where both public and private access are allowed. We recommend that you use IP filtering when your VPC service is also exposed to the public internet. + +.. note:: + + **Public IP filters** are restricted via VPC. IP filters apply to publicly-accessible endpoints only. -Also note that safelisting applies to both internal and external traffic. If you safelist an external IP address and want to keep traffic flowing with the internal (peered) connections, make sure that you safelist the CIDR blocks of the peered networks as well to avoid disruptions to the service. +Safelisting applies to both internal and external traffic. If you safelist an external IP address and want to keep traffic flowing with the internal (peered) connections, make sure that you safelist the CIDR blocks of the peered networks as well to avoid disruptions to the service. Troubleshoot VPC connection issues -------------------------------------- +---------------------------------- + +Any network changes to VPC peered hosts external from Aiven can cause issues with routing to your Aiven services hosted in a VPC. +In such case, try to refresh your VPC connections. + +.. note:: + + Changes to your VPCs (such as adding a new subnet) can take up to 24 hours to take effect so wait at least 24h before refreshing your VPC connections. -Any network changes to VPC peered hosts external from Aiven can cause issues with routing to your Aiven services hosted in a VPC. To troubleshoot such issues, take the following steps: +To refresh your VCP connections: 1. In `Aiven Console `_, select **VPCs**. 2. Find the ID of the affected VPC and select it from the **Internal ID** column. 3. Select **Refresh VPC connections**. -As a result, the platform checks the VPC peering connection and rebuilds the peering connection state if there are any changes detected. +The platform checks the VPC peering connection and rebuilds the peering connection state if there are any changes detected. For any other issues, open a support ticket from `Aiven Console `_ to get in touch with the support team and/or see :doc:`Get support in the Aiven Console `. diff --git a/docs/platform/howto/migrate-services-cloud-region.rst b/docs/platform/howto/migrate-services-cloud-region.rst index 6dac2846ba..a647ace3d4 100644 --- a/docs/platform/howto/migrate-services-cloud-region.rst +++ b/docs/platform/howto/migrate-services-cloud-region.rst @@ -8,11 +8,12 @@ The short interruption mentioned above does not include the potential delays cau When spinning a new Aiven service, you are not tied to a cloud provider or region. Your services can be migrated to better match your needs. Services can be moved to another cloud provider, or another region within the same provider, or both. -1. Log in to `Aiven Console `_. -2. Go to your **Services**, and select the service you want to migrate. -3. On the **Overview** page of your service, scroll down to **Cloud and VPC** > **Migrate cloud**. -4. In the **Migrate service to another cloud** window, select new cloud provider and region. -5. Select **Migrate**. +1. Log into the `Aiven Console `_, select your project and then select the service you want migrate to another cloud or region. +2. On the service page, click **Service settings** from the sidebar. +3. In the **Cloud and network** section, click **Actions (...)**. +4. From the dropdown menu, click **Change cloud or region**. +5. In the **Migrate service to another cloud** window, select new cloud provider and region. +6. Select **Migrate**. .. topic:: Result diff --git a/docs/platform/howto/migrate-services-vpc.rst b/docs/platform/howto/migrate-services-vpc.rst index ccb5a5e2bf..f8dd53b4c9 100644 --- a/docs/platform/howto/migrate-services-vpc.rst +++ b/docs/platform/howto/migrate-services-vpc.rst @@ -1,36 +1,29 @@ Migrate a public service to a Virtual Private Cloud (VPC) ========================================================== -If you are running your Aiven service over the public Internet, you may want to restrict access or allow connectivity between a your own Virtual Private Cloud (VPC, also known as VNet) and Aiven. +When operating your Aiven service over the public Internet, you might consider enhancing security or connectivity by transitioning to a Virtual Private Cloud (VPC, also known as VNet). This move allows you to restrict access and establish a more controlled environment between your VPC and Aiven. -Aiven allows one click migrations between regions and cloud providers; the same is true between the public Internet and a VPC. However, the movement of your service can mean that connectivity is interrupted as the service URI will change its resolution from public IP addresses to IP addresses within a VPC. +Aiven simplifies the migration process, allowing for seamless one-click transitions between different regions and cloud providers, including the migration from the public Internet to a VPC. However, migrating your service can disrupt connectivity due to the service URI's change in resolution from public IP addresses to IP addresses within a VPC. -To ensure consistent access during migration, you can use the ``public access`` advanced configuration to allow your service to be available over the public Internet as well as over a VPC. +To maintain uninterrupted access throughout this transition, Aiven provides a :doc:`public access ` advanced configuration. This ensures that your service remains accessible over both the public Internet and within the VPC during migration. -To ensure that you are always able to connect to the service -during the migration phase, you can use a few simple -tests during the migration phase. +To ensure uninterrupted access to your service during the migration phase, conduct a few simple tests to verify connectivity. -Overview ---------- -To safely migrate Aiven services into a VPC, take the following steps: +Migration process +-------------------- -#. Create a VPC and set up peering. +Follow these steps to migrate Aiven services to a VPC: -#. Validate network peering connection with the test service. +#. **VPC creation and peering**: :doc:`Create a VPC ` and establish peering. +#. **Network peering validation**: Test the network peering with a non-critical service. +#. **Enable public access**: Activate public access for all services to be migrated. +#. **Application configuration update**: Modify your application to use public access hostnames. +#. **Service migration**: Start the migration of the service into the VPC. +#. **Peering connections validation**: Confirm the functionality of peering connections with private hostnames and ports. +#. **Switch to private access hostnames**: Change application configuration to use private access hostnames. +#. **Disable public access**: Turn off public access for all services. -#. Enable public access on all the services to be migrated. - -#. Change your application configuration to use public access hostnames. - -#. Migrate the service into the VPC. - -#. Validate peering connections to all private hostnames and ports. - -#. Change application configuration to use private access hostnames. - -#. Disable public access on all the services. .. note:: @@ -39,51 +32,48 @@ To safely migrate Aiven services into a VPC, take the following steps: Initial setup -------------- -Ensure that you have the VPC created and peering is active. This is can be automated via Terraform if needed. Check out -:ref:`how to set up VPC peering ` on the Aiven platform. +Before you begin, make sure you have created your VPC and that peering is active. Automating this process with Terraform is an option. For guidance, refer to :ref:`setting up VPC peering ` on the Aiven platform. + +To illustrate the process, we will use the ``google-us-east1`` VPC as a reference. Ensure both your VPC and its peering connection are in an ``Active`` state. -In this guide, the ``google-us-east1`` VPC is used for testing. Ensure that the VPC and peering are both in an ``Active`` state. Testing the connection ----------------------- -Check that you can connect to a non-critical service to ensure that the networks are peered. For that purpose, you can create a small service inside an existing VPC to test the network connection. +To confirm network peering, test the connection with a non-critical service within your VPC. This involves creating a small service in an existing VPC to check the network connectivity. -Ensure that your service is deployed into a VPC. - -From a host within your own VPC, make sure that you can resolve the DNS -hostname, and connect to the service port. The following commands work -on Ubuntu 20.04 LTS and should work for most Linux distributions: +Deploy your service into a VPC. Then, from a host within your VPC, ensure you can resolve the DNS hostname and connect to the service port. The following commands are typically used on Ubuntu 20.04 LTS and should be applicable for most Linux distributions: - ``nslookup {host}`` - - ``nc -vz {host} {port}`` + Enable public access --------------------- -Enable the ``public_access.{service type}`` configuration on all of your services in the -**Advanced Configuration** section (in `Aiven Console `_ > your service's **Overview** page). For example, the configuration name is ``public_access.kafka`` for Aiven for Apache Kafka®. This creates a new hostname and -port. It is still publicly available once the service is moved into the VPC. +Enable the ``public_access.{service type}`` configuration for all services you are migrating. This setting is found in the **Advanced Configuration** section of the `Aiven Console `_ on your service's **Overview** page. For example, for Aiven for Apache Kafka®, the configuration would be ``public_access.kafka``. This action creates a new hostname and port, which remain publicly accessible even after the service is moved into the VPC. -You will now see a new hostname and port under "Connection Information" -by selecting the ``Public`` "Access Route" +You can find the new hostname and port under **Connection Information** by selecting the ``Public`` "Access Route". Make sure you can connect to each new host/port combination. -Ensure that you can connect to each host/port combination. Configure and redeploy application ----------------------------------- -It is highly recommended to configure your applications to use the -public access route during migration. This ensures access to the -services while moving into a private network. In dual access mode, you -can test all connections and ports before finally cutting over to use -the private hostname(s) and underlying private IP addresses. +It's recommended to configure your applications to use the public access route during the migration. This ensures continued access to the services as they transition to a private network. In dual access mode, test all connections and ports before switching over to the private hostname(s) and IP addresses. Migrate Aiven service to your VPC ---------------------------------- +You can migrate your Aiven services into a VPC using the `Aiven Console `_ . + +#. Log in to the `Aiven Console `_ select your project and then select the service. +#. On the service page, select **Service settings** from the sidebar. +#. In the **Cloud and network** section, click **Actions (...)**. +#. Choose **Change cloud or region** from the dropdown menu. +#. In the **Migrate service to another cloud** window, select the **VPC** tab and choose the appropriate region for your project's dedicated VPC. Make sure the `Public Internet` tag is noted. +#. Start the migration process. Afterwards, monitor for the ``Project VPC`` tag to appear on the service's page, which signifies the successful migration. + In `Aiven Console `_, use the **Cloud and VPC** > **Migrate cloud** section on the service's **Overview** page to migrate your Aiven services into a VPC. Note the ``Public Internet`` tag. Ensure that you select the region from the ``VPC`` tab. This is a @@ -92,30 +82,26 @@ dedicated VPC for your project. Ensure that you see the ``Project VPC`` tag after migration. You can monitor the migration status on the service's page in `Aiven Console `_. + + Testing the service connections -------------------------------- -After the migration, you will see some private IP addresses if you use -the ``nslookup`` command. Ensure that you can connect to the private -hostnames and ports, for example, firewall rules and routing works. +Post-migration, use the ``nslookup`` command to see private IP addresses. Ensure you can connect to the private hostnames and ports, verifying that firewall rules and routing are functioning correctly. Configure and redeploy your applications ----------------------------------------- -Now you can convert your application to use the private hostname again. +After the migration, reconfigure your applications to use the private hostname. + Cleanup by disabling public access ----------------------------------- -Disable the ``public_access.{service type}`` configuration on all of your services in -the **Advanced configuration** section (in `Aiven Console `_ > your service's **Overview** page). This removes the ``public-`` prefixed hostname and port. +To finalize the migration, disable the ``public_access.{service type}`` configuration for all your services. This option is located in the **Advanced configuration** section of the Aiven Console on your service's **Overview** page. Disabling it removes the ``public-`` prefixed hostname and port. Conclusion ---------- -These steps allow you to perform public => VPC service migrations with -zero downtime in a safe manner by testing connections every step of the -way. As always, ensure that your client applications have failure and -retry logic as the underlying servers and IP addresses change. This is usually -not an issue in clustered services, for example, Apache Kafka® and OpenSearch®, but -might require additional configuration for services like PostgreSQL® and Redis®*. +By following these steps, you can migrate services from public to VPC with minimal downtime, ensuring safe and tested connections throughout the process. Always equip your client applications with failure and retry logic to adapt to changes in servers and IP addresses. While this is typically straightforward for clustered services like Apache Kafka® and OpenSearch®, additional configurations might be necessary for services like PostgreSQL® and Redis®. + diff --git a/docs/platform/howto/private-ip-resolution.rst b/docs/platform/howto/private-ip-resolution.rst index ad3de40617..844836ab97 100644 --- a/docs/platform/howto/private-ip-resolution.rst +++ b/docs/platform/howto/private-ip-resolution.rst @@ -22,15 +22,15 @@ DNS-rebinding protection on your network. To verify this assumption: ``8.8.8.8``. This has no rebinding protection so serves as a good test. You can use the ``dig`` command: -.. code:: + .. code:: - dig +short myservice-myproject.aivencloud.com @8.8.8.8 + dig +short myservice-myproject.aivencloud.com @8.8.8.8 3. Compare the output of the above command with the response from your default DNS resolver: -.. code:: + .. code:: - dig +short myservice-myproject.aivencloud.com + dig +short myservice-myproject.aivencloud.com 4. If the response from your default DNS resolver does not return the same IP address as the earlier test, then your default DNS resolver is blocking the diff --git a/docs/platform/howto/project-support-center.rst b/docs/platform/howto/project-support-center.rst index ab6136e14d..4901bcbba0 100644 --- a/docs/platform/howto/project-support-center.rst +++ b/docs/platform/howto/project-support-center.rst @@ -1,24 +1,67 @@ -Get support in the Aiven Console -================================= +Support +======== -Aiven is committed to providing excellent and responsive support to all of our customers. We monitor our services 24/7 and rapidly address any anomalies. +The Basic support tier is provided to all customers on paid service plans. Aiven offers three additional support tiers with faster response times, phone support, and other services. For more information about the tiers, check out the `Aiven support details page `_ or contact sales@Aiven.io. -The Basic support tier is provided to all customers on paid plans. If you are using a free service, you can ask questions in the Aiven Community Forum. +For other services included in your support tier - such as business reviews or disaster recovery planning - contact the sales team at sales@Aiven.io. If you are using a free service, you can ask questions in the `Aiven Community Forum `_. -In addition to the Basic support tier, we offer three additional tiers that offer faster response times, phone support, and other services. For more information about support plans, please see `Aiven support details page `_ or contact sales@Aiven.io to discuss the paid support options. +The Aiven service level agreement (SLA) is available on `the SLA page `_. Custom SLAs are available for premium plans. Contact the sales team at sales@Aiven.io for more details. + + +.. _upgrade-support-tier: + +Upgrade your support tier +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you have questions or want to downgrade your support tier, contact your account team. + +To upgrade your organization's support tier in the `Aiven Console `_: + +#. In the organization, click **Support**. + +#. In the **Current support tier** section, click **Upgrade to Enterprise**. + +#. Choose the support tier you want to upgrade to and click **Choose tier**. + +#. Select a **Start date**. + + .. note:: + If you select the current month, you will be charged a percentage of the total service spend for the whole month, starting from the 1st. + +#. Select a **Billing group**. + + .. important:: + The support costs for all current and future services in the selected organization and all of its organizational units will be added to the invoice for this billing group. + +#. Click **Upgrade tier**. + +It typically takes 1-2 business days to set up the new support tier. You can view the status of your request on the support page under **Current support tier**. -You can create and track support tickets in the `Aiven Console `_ following the instructions on this page. For other services included in your support tier - such as business reviews or disaster recovery planning - contact the sales team at sales@Aiven.io. Create a support ticket ~~~~~~~~~~~~~~~~~~~~~~~~ -To create support tickets in the `Aiven Console `_: +#. In the `Aiven Console `_, click **Support**. -#. Click **Support**. +#. Click **Go to Aiven Support Center**. #. Click **Create ticket**. -#. Enter a **Subject** and detailed **Description** of the issue. +#. Enter email addresses to CC in the support ticket. All new comments and updates will be sent to these emails. + +#. Enter a **Subject**. + +#. Select a **Severity** level: + + * Low: The primary functions are working, but some secondary functionality is not working. + * High: The primary functions are working, but severely limited or slow. + * Critical: The primary functions are not working and it's not possible to find workarounds. + +#. Optional: Enter the ID of the affected projects and services. + +#. Select the affected **Product** and the reason for creating the ticket. + +#. Enter a detailed **Description** of the issue. .. note:: @@ -29,10 +72,63 @@ To create support tickets in the `Aiven Console `_: * Any error messages * Any languages or frameworks you are using -#. Select the affected project, service, and a severity level. +#. Optional: Upload files such as screenshots, logs, or :ref:`HAR files `. + + .. important:: + Aiven support will never ask you to provide sensitive data such as passwords or personal information. Remove or replace sensitive data in files that you attach to a support ticket. #. Click **Create ticket**. -The new support ticket is sent to our support team for review and you also receive a confirmation email. `Response times `_ vary by case severity and support tier. +You can track the status of your tickets on the **My tickets** page. `Response times `_ vary by case severity and support tier. If you are not satisfied with the processing of your ticket, add ``#escalate`` in the comments. + + +Add participants to a support ticket +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you want to give every organization user access to all support tickets in your organization contact your account team. + +To add Aiven users to a support ticket: + +#. On the **My tickets** page, open the ticket. + +#. Click **Add to conversation**. + +#. Add the email addresses in the **CC** field separated by a space. This must be the same email address they use to log in. + +#. Enter a comment and click **Submit**. + + +Get notifications for all support tickets +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Super admin can get notifications for updates on all tickets in their organization. + +#. Click **My tickets**. + +#. On the **Tickets in my organization** tab, click **Follow all tickets**. + +You will get email notifications for all updates on both existing and new tickets. You can unfollow them at any time. + + +.. _create-har-files: + +Create HAR files +~~~~~~~~~~~~~~~~~ + +The support team may need information about the network requests that are generated in your browser when you experience a problem. Browsers can capture a log of these network requests in a HAR (HTTP Archive) file. + +If the support team asks for a HAR file: + +#. Use your browser to create the HAR file while you go through the steps to reproduce the problem: + + * Follow the `instructions for Internet Explorer/Edge, Firefox, and Chrome `_. + * For Safari, make sure you can access the `developer tools `_ and then follow the instructions for `exporting a HAR file `_. + +#. Replace sensitive data in the file with placeholders while retaining the JSON structure and format. Examples of sensitive data include: + + * Personal identifiers such as email addresses and phone numbers + * Authentication tokens or passwords + * Sensitive URLs + * Sensitive cookies or headers -You can track the status of your tickets from the project support center, but information and updates are sent directly to your email address. You can update your ticket's details and communicate with the support team by replying to these emails. If you are on a paid support tier, your account team is also notified. \ No newline at end of file +#. Send the sanitized file to the support team in your reply to their email or in the ticket's comments. diff --git a/docs/platform/howto/public-access-in-vpc.rst b/docs/platform/howto/public-access-in-vpc.rst index 4450a4b4fe..983252737b 100644 --- a/docs/platform/howto/public-access-in-vpc.rst +++ b/docs/platform/howto/public-access-in-vpc.rst @@ -4,9 +4,12 @@ Enable public access in VPCs To enable public access for a service which is running within a virtual private cloud (VPC), follow these steps: #. Log in to `Aiven Console `_ and select your service from the **Services** page. -#. On the **Overview** page of your service, scroll down to the **Advanced configuration** section and select **Change** > **Add configuration option**. -#. Select an option that starts with ``public_access`` followed by the type of service you use and switch it on. -#. Click **Save advanced configuration**. The **Overview** page now has an **Access Route** setting inside the **Connection information** section with **Public** and **Dynamic** options. +#. On the **Overview** page of your service, select **Service settings** from the sidebar. +#. On the **Service settings** page, navigate to the **Cloud and network** section and select **More network configurations** from the actions (**...**) menu. +#. In the **Network configuration** window, select **Add configuration options**. In the search field, enter ``public_access``. From the displayed parameter names, select a parameter name for your service type. Select the toggle switch to enable the selected parameter. Select **Save configuration**. + + The **Overview** page now has an **Access Route** setting inside the **Connection information** section with **Public** and **Dynamic** options. + #. Select **Public** to see the public URL for your service. The connection with the **Dynamic** option is not possible outside the VPC, while the connection with the **Public** option is accessible over the public Internet. **IP Allow-List** applies to all connection types (Dynamic and Public, in this example). diff --git a/docs/platform/howto/reactivate-suspended-project.rst b/docs/platform/howto/reactivate-suspended-project.rst index 705518a16f..22dd13e2e9 100644 --- a/docs/platform/howto/reactivate-suspended-project.rst +++ b/docs/platform/howto/reactivate-suspended-project.rst @@ -3,9 +3,11 @@ Reactivate suspended projects If you have bills past due and didn't set up a payment method, you may receive this error when trying to log in to the console: - ERROR +.. code:: - Project suspended, access prohibited + ERROR + + Project suspended, access prohibited Reactivate a suspended project ------------------------------ diff --git a/docs/platform/howto/restrict-access.rst b/docs/platform/howto/restrict-access.rst index 8be0d51f6b..9b201cfa41 100644 --- a/docs/platform/howto/restrict-access.rst +++ b/docs/platform/howto/restrict-access.rst @@ -9,14 +9,15 @@ By default, Aiven services are publicly accessible, but you can restrict access 1. Log in to `Aiven Console `_. 2. On the **Services** page, select the service you want to restrict. -3. On the **Overview** page of your service, scroll down to **Allowed IP addresses**, and select **Change**. -4. In the **Allowed inbound IP addresses** window, enter your address or address block using the CIDR notation, and select the **+** icon to add it to the list of the trusted IP addresses. +3. On the **Overview** page of your service, select **Service settings** from the sidebar. +4. On the **Service settings** page, navigate to the **Cloud and network** section and select **Set public IP filters** from the actions (**...**) menu. +5. In the **Allowed inbound IP addresses** window, enter your address or address block using the CIDR notation, and select the **+** icon to add it to the list of the trusted IP addresses. .. note:: You can add multiple addresses or address blocks or combination of both at once. -5. Select **Close**. +6. Select **Close**. .. topic:: Result diff --git a/docs/platform/howto/saml/saml-authentication.rst b/docs/platform/howto/saml/saml-authentication.rst index 2a8a688502..f1e7bd79ae 100644 --- a/docs/platform/howto/saml/saml-authentication.rst +++ b/docs/platform/howto/saml/saml-authentication.rst @@ -28,9 +28,9 @@ SAML Authentication methods are configured at the organization level: #. In the organization, click **Admin**. -#. Select **Authentication**. +#. Select **Identity providers**. -#. Click on **Add authentication method**. +#. Click **Add identity provider**. #. Enter a name and select SAML. You can also select the groups that users will be added to when they sign up or log in through this authentication method. diff --git a/docs/platform/howto/saml/setup-saml-auth0.rst b/docs/platform/howto/saml/setup-saml-auth0.rst index aab3637ca6..61f55b254c 100644 --- a/docs/platform/howto/saml/setup-saml-auth0.rst +++ b/docs/platform/howto/saml/setup-saml-auth0.rst @@ -8,9 +8,9 @@ Prerequisite steps in Aiven Console #. In the organization, click **Admin**. -#. Select **Authentication**. +#. Select **Identity providers**. -#. Click **Add authentication method**. +#. Click **Add identity provider**. #. Enter a name and select SAML. You can also select the groups that users will be added to when they sign up or log in through this authentication method. @@ -42,15 +42,15 @@ Configure SAML on Auth0 10. In the **Settings** section for the Application Callback URL, remove the existing configuration and add the following field mapping configuration: -.. code-block:: shell - - { - "email": "email", - "first_name": "first_name", - "identity": "email", - "last_name": "last_name", - "mapUnknownClaimsAsIs": true - } + .. code-block:: shell + + { + "email": "email", + "first_name": "first_name", + "identity": "email", + "last_name": "last_name", + "mapUnknownClaimsAsIs": true + } 11. Click **Enable** and **Save**. @@ -81,10 +81,11 @@ Go back to the **Authentication** page in `Aiven Console `_ to check the process step by step. +If you have issues, you can use the `SAML Tracer browser extension `_ to check the process step by step. diff --git a/docs/platform/howto/saml/setup-saml-azure.rst b/docs/platform/howto/saml/setup-saml-azure.rst index 947ad77a11..9c9353e78c 100644 --- a/docs/platform/howto/saml/setup-saml-azure.rst +++ b/docs/platform/howto/saml/setup-saml-azure.rst @@ -9,9 +9,9 @@ Prerequisite steps in Aiven Console #. In the organization, click **Admin**. -#. Select **Authentication**. +#. Select **Identity providers**. -#. Click **Add authentication method**. +#. Click **Add identity provider**. #. Enter a name and select SAML. You can also select the groups that users will be added to when they sign up or log in through this authentication method. diff --git a/docs/platform/howto/saml/setup-saml-fusionauth.rst b/docs/platform/howto/saml/setup-saml-fusionauth.rst index a449349678..68c69bae0f 100644 --- a/docs/platform/howto/saml/setup-saml-fusionauth.rst +++ b/docs/platform/howto/saml/setup-saml-fusionauth.rst @@ -8,9 +8,9 @@ Prerequisite steps in Aiven Console #. In the organization, click **Admin**. -#. Select **Authentication**. +#. Select **Identity providers**. -#. Click **Add authentication method**. +#. Click **Add identity provider**. #. Enter a name and select SAML. You can also select the groups that users will be added to when they sign up or log in through this authentication method. @@ -79,8 +79,7 @@ Next, create an application in your FusionAuth instance: #. On the **SAML** tab, and toggle on the **Enabled** switch. -#. Paste the **Metadata URL** and **ACS URL** you copied from the Aiven Console to the **Issuer** and -**Authorized redirect URLs** fields in your FusionAuth application, respectively. +#. Paste the **Metadata URL** and **ACS URL** you copied from the Aiven Console to the **Issuer** and **Authorized redirect URLs** fields in your FusionAuth application, respectively. .. list-table:: :header-rows: 1 diff --git a/docs/platform/howto/saml/setup-saml-google.rst b/docs/platform/howto/saml/setup-saml-google.rst index a4aae039fa..3e2cdb7b7f 100644 --- a/docs/platform/howto/saml/setup-saml-google.rst +++ b/docs/platform/howto/saml/setup-saml-google.rst @@ -8,9 +8,9 @@ Prerequisite steps in Aiven Console #. In the organization, click **Admin**. -#. Select **Authentication**. +#. Select **Identity providers**. -#. Click **Add authentication method**. +#. Click **Add identity provider**. #. Enter a name and select SAML. You can also select the groups that users will be added to when they sign up or log in through this authentication method. diff --git a/docs/platform/howto/saml/setup-saml-jumpcloud.rst b/docs/platform/howto/saml/setup-saml-jumpcloud.rst index 2f51194a33..d0958f1cd4 100644 --- a/docs/platform/howto/saml/setup-saml-jumpcloud.rst +++ b/docs/platform/howto/saml/setup-saml-jumpcloud.rst @@ -8,9 +8,9 @@ Prerequisite steps in Aiven Console #. In the organization, click **Admin**. -#. Select **Authentication**. +#. Select **Identity providers**. -#. Click **Add authentication method**. +#. Click **Add identity provider**. #. Enter a name and select SAML. You can also select the groups that users will be added to when they sign up or log in through this authentication method. diff --git a/docs/platform/howto/saml/setup-saml-okta.rst b/docs/platform/howto/saml/setup-saml-okta.rst index 3778b6cbdb..c054038e40 100644 --- a/docs/platform/howto/saml/setup-saml-okta.rst +++ b/docs/platform/howto/saml/setup-saml-okta.rst @@ -8,9 +8,9 @@ Prerequisite steps in Aiven Console #. In the organization, click **Admin**. -#. Select **Authentication**. +#. Select **Identity providers**. -#. Click **Add authentication method**. +#. Click **Add identity provider**. #. Enter a name and select SAML. You can also select the groups that users will be added to when they sign up or log in through this authentication method. @@ -34,7 +34,7 @@ This is a two step process. First, you create the SAML SP-Initiated authenticati #. Enter a name for the app and add a logo. -#. Set it's visibility for your Okta users and click **Next**. +#. Set its visibility for your Okta users and click **Next**. #. Set the following values in the app configuration: diff --git a/docs/platform/howto/saml/setup-saml-onelogin.rst b/docs/platform/howto/saml/setup-saml-onelogin.rst index bf668e7af0..ea776980bb 100644 --- a/docs/platform/howto/saml/setup-saml-onelogin.rst +++ b/docs/platform/howto/saml/setup-saml-onelogin.rst @@ -8,9 +8,9 @@ Prerequisite steps in Aiven Console #. In the organization, click **Admin**. -#. Select **Authentication**. +#. Select **Identity providers**. -#. Click **Add authentication method**. +#. Click **Add identity provider**. #. Enter a name and select SAML. You can also select the groups that users will be added to when they sign up or log in through this authentication method. diff --git a/docs/platform/howto/scale-services.rst b/docs/platform/howto/scale-services.rst index 3c3e7d2223..df2e99a1a8 100644 --- a/docs/platform/howto/scale-services.rst +++ b/docs/platform/howto/scale-services.rst @@ -4,15 +4,15 @@ Scale your service When creating a new Aiven service, you are not tied to a plan. Your services can be adjusted to better match your needs. Services can be moved to a higher or lower plan, and to a different tier—Startup, Business or Premium. -1. Log in to `Aiven Console `_. -2. Go to your **Services**, and select the service you want to scale. -3. On the **Overview** page of your service, scroll down to **Service plan** > **Change plan**. -4. In the **Change service plan** window, select the new service plan and new tier, if required. -5. Select **Change**. +1. Log into the `Aiven Console `_, select your project and then select the service you want to scale. +2. On the service page, click **Service settings** from the sidebar. +3. In the **Service plan** section, click **Actions (...)**. +4. From the dropdown menu, click **Change plan**. +5. In the **Change service plan** dialog, choose the new service plan and tier, if required. +6. Click **Change**. -.. topic:: Result +Your service is in the *Rebuilding* state. Once the rebuilding is over, your new service plan will be active on your service. The service is still accessible through the plan-change process. - Your service is in the *Rebuilding* state. Once the rebuilding is over, your new service plan will be active on your service. The service is still accessible through the plan-change process. .. note:: diff --git a/docs/platform/howto/search-services.rst b/docs/platform/howto/search-services.rst index 2d7c4d789b..031697db3a 100644 --- a/docs/platform/howto/search-services.rst +++ b/docs/platform/howto/search-services.rst @@ -20,13 +20,17 @@ You can also add filters to the search field yourself. The supported filters are * ``provider`` * ``region`` -You can add multiple values to filters separated by a comma. For example, this is how you would view all running PostgreSQL® services that are hosted on AWS or Google Cloud:: +You can add multiple values to filters separated by a comma. For example, this is how you would view all running PostgreSQL® services that are hosted on AWS or Google Cloud: - service:pg status:running provider:aws,google +.. code:: -You can use these filters alongside keyword searches. For example, to see all powered off Kafka® services with *production* in the name, you could use the following::: + service:pg status:running provider:aws,google - production service:kafka status:poweroff +You can use these filters alongside keyword searches. For example, to see all powered off Kafka® services with *production* in the name, you could use the following: + +.. code:: + + production service:kafka status:poweroff Filter by service type ~~~~~~~~~~~~~~~~~~~~~~~ @@ -107,6 +111,8 @@ To filter the services by the cloud provider they are hosted on, use the filter Filter by cloud region ~~~~~~~~~~~~~~~~~~~~~~~ -Find the supported values for the ``region`` filter in the *Cloud* column of the tables in :doc:`List of available cloud regions `. For example, to see all services in the AWS ``eu-central-1`` region, you use this filter:: +Find the supported values for the ``region`` filter in the *Cloud* column of the tables in :doc:`List of available cloud regions `. For example, to see all services in the AWS ``eu-central-1`` region, you use this filter: - region:aws-eu-central-1 +.. code:: + + region:aws-eu-central-1 diff --git a/docs/platform/howto/set-authentication-policies.rst b/docs/platform/howto/set-authentication-policies.rst new file mode 100644 index 0000000000..ee0a8e90b2 --- /dev/null +++ b/docs/platform/howto/set-authentication-policies.rst @@ -0,0 +1,42 @@ +Set authentication policies for organization users +=================================================== + +The authentication policy for your organization specifies the ways that users can access your organization on the Aiven platform: with a password, third-party authentication, or organization single sign-on (SSO). + +Authentication types +--------------------- + +Passwords and two-factor authentication +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +With password authentication enabled, users log in with their email address and password. For an added layer of security, you can enforce two-factor authentication (2FA) for password logins for all users in your organization. + +When two-factor authentication is required, users won't be able to access any resources in your organization until they set up 2FA. + +.. note:: + Authentication tokens are not affected and will continue to work when you enable two-factor authentication. + +Third-party authentication +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Users can choose to log in using Google, Microsoft, or GitHub. + +Organization identity providers (SSO) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Organization users are restricted to logging in using SSO through an :doc:`identity provider `. + +Set an authentication policy +------------------------------ + +To set an authentication policy for all users in an organization: + +#. In the organization, click **Admin**. + +#. Click **Authentication**. + +#. Click the toggle for each authentication method that you want to allow. + +#. Click **Save changes**. + + diff --git a/docs/platform/howto/static-ip-addresses.rst b/docs/platform/howto/static-ip-addresses.rst index 60603ed99c..6c0dda8d77 100644 --- a/docs/platform/howto/static-ip-addresses.rst +++ b/docs/platform/howto/static-ip-addresses.rst @@ -59,7 +59,9 @@ The command returns some information about the newly created static IP address. When the IP address has been provisioned, the state turns to ``created``. The list of static IP addresses in the current project is available using the -``static-ip list`` command:: +``static-ip list`` command: + +.. code:: avn static-ip list @@ -81,7 +83,9 @@ Associate static IP addresses with a service -------------------------------------------- Using the name of the service, and the ID of the static IP address, you can -assign which service a static IP should be used by:: +assign which service a static IP should be used by: + +.. code:: avn static-ip associate --service my-static-pg ip359373e5e56 avn static-ip associate --service my-static-pg ip358375b2765 diff --git a/docs/platform/howto/tag-resources.rst b/docs/platform/howto/tag-resources.rst index 303aaf1e5c..b3120defd8 100644 --- a/docs/platform/howto/tag-resources.rst +++ b/docs/platform/howto/tag-resources.rst @@ -36,7 +36,7 @@ To add tags to a project, take the following steps: #. Log in to `Aiven Console `_ and select your organization and your project from the top navigation bar. #. On the project's page, select **Settings** from the sidebar. -#. On the **Settings** page, add a key and its value in the **Billing Reference Tags** or **Project Tags** fields, and select the **+** icon to add more tags in the same manner. +#. On the **Settings** page, click **Add tag** and enter a key and its value in the **Billing Reference Tags** or **Project Tags** fields, and select the **+** icon to add more tags in the same manner. #. Select **Save changes** to save all of your tags. .. topic:: Result @@ -45,18 +45,17 @@ To add tags to a project, take the following steps: Add tags to services """""""""""""""""""" +To add tags to a service, follow these steps: -To add tags to a service, take the following steps: +1. Log in to the `Aiven Console `_ and select your organization and your project from the top navigation bar. +2. On the **Services** page of your project, select the service you wish to tag. +3. On the service page, select **Service settings** from the sidebar. +4. In the **Service status** section, click **Actions (...)**, then click **Add service tags** from the dropdown menu. +5. In the **Tag this service** dialog, enter a key and its value in the **Service Tags** fields. +6. Click **Add tag** to add additional tags. +7. Click **Save changes** to apply the tags. -#. Log in to `Aiven Console `_ and select your organization and your project from the top navigation bar. -#. On the **Services** page of your project, select the service that you want to tag. -#. On the **Overview** page of the service, go to **Service Tags** > **Tag service**. -#. In the **Tag this service** window, add a key and its value in the **Service Tags** fields, and select the **+** icon to add more tags in the same manner. -#. Select **Save changes** to save all of your tags. - -.. topic:: Result - - You can see the tags listed in the table on the **Projects** page. +You can see the tags listed in the table on the **Projects** page. Add and modify resource tags with the Aiven client @@ -65,46 +64,60 @@ Add and modify resource tags with the Aiven client Add and modify service tags """""""""""""""""""""""""""" -* Add new tags to a service:: +* Add new tags to a service: - avn service tags update your-service --add-tag business_unit=sales --add-tag env=smoke_test + .. code:: -* Modify or remove tags:: + avn service tags update your-service --add-tag business_unit=sales --add-tag env=smoke_test - avn service tags update your-service --add-tag env=production --remove-tag business_unit +* Modify or remove tags: + + .. code:: + + avn service tags update your-service --add-tag env=production --remove-tag business_unit -* List service tags:: +* List service tags: - avn service tags list your-service - KEY VALUE - === ========== - env production + .. code:: + + avn service tags list your-service + KEY VALUE + === ========== + env production -* Replace tags with a set of new ones, removing the old ones:: +* Replace tags with a set of new ones, removing the old ones: - avn service tags replace your-service --tag cost_center=U1345 + .. code:: + + avn service tags replace your-service --tag cost_center=U1345 - avn service tags list your-service - KEY VALUE - =========== ===== - cost_center U1345 + avn service tags list your-service + KEY VALUE + =========== ===== + cost_center U1345 Add and modify project tags """""""""""""""""""""""""""" The commands ``update``, ``list`` and ``replace`` exist for tagging projects too, and work the same way: -* Add tags to a project:: +* Add tags to a project: + + .. code:: + + avn project tags update --project your-project --add-tag business_unit=sales - avn project tags update --project your-project --add-tag business_unit=sales +* Replace project tags: + + .. code:: -* Replace project tags:: + avn project tags replace --project your-project --tag env=smoke_test - avn project tags replace --project your-project --tag env=smoke_test +* List project tags: -* List project tags:: + .. code:: - avn project tags list - KEY VALUE - === ========== - env smoke_test + avn project tags list + KEY VALUE + === ========== + env smoke_test diff --git a/docs/platform/howto/technical-emails.rst b/docs/platform/howto/technical-emails.rst index 5784ba3a3e..11b5493bd3 100644 --- a/docs/platform/howto/technical-emails.rst +++ b/docs/platform/howto/technical-emails.rst @@ -1,19 +1,37 @@ -Get technical notifications -============================ +Manage project and service notifications +========================================= -To stay up to date with the latest news about the services in a project, you can set up notifications for technical contacts for the project. If you don't add a technical email for a project, email notifications will still be sent to users with admin and operator roles. +To stay up to date with the latest information about services and projects, you can set service and project contacts to receive email notifications. Notifications include information about plan sizes, performance, outages, and scheduled maintenance. -Notifications include information about plan sizes, performance, outages and scheduled maintenance. High priority notifications (for example, a plan running out of space) are also sent to project admin users. +The contacts for a project can be set to the admin and operators of that project (this is the default), or to specific email addresses. Project contacts receive notifications about the project. They also receive the notifications for all services, unless you set a separate service contact for a service. -Set up email notifications -""""""""""""""""""""""""""" +Service contacts by default are the project contacts. However, if you set other email addresses as service contacts for a service, email notifications will only be sent to those contacts for that specific service. + +.. _set-project-contacts: + +Set project contacts +""""""""""""""""""""" #. In the project, click **Settings**. -#. In the **Technical Emails** section, add the email addresses. +#. On the **Notifications** tab, select the project contacts that you want to receive email notifications. #. Click **Save changes**. +.. _set-service-contacts: + +Set service contacts +""""""""""""""""""""" + +#. In the service, click **Service settings**. + +#. In the **Service status** section, open the menu in the top right and select **Change service contacts**. + +#. Select the contacts that you want to receive email notifications for this service. + +#. Click **Save**. + + Set up Slack notifications """"""""""""""""""""""""""" @@ -24,13 +42,8 @@ To get notifications in Slack, you can add a Slack channel's or DM email address .. note:: If you don't see the email integrations option, ask the owner or admin of the workspace or organization to `allow incoming emails `_. -#. In the Aiven Console, go to the project that you want to get notifications for. - -#. Click **Settings**. +#. In the `Aiven Console `_, go to the project or service that you want to get notifications for. -#. In the **Technical Emails** section, add the email address that you created for the Slack channel or DM. - - -#. Click **Save changes**. +#. Follow the instructions to set the Slack email address as a :ref:`project contact ` or :ref:`service contact `. Alternatively, you can `set up a Slackbot forwarding address `_ and use that to automatically forward Aiven's email notifications from your email client. \ No newline at end of file diff --git a/docs/platform/howto/use-aws-privatelinks.rst b/docs/platform/howto/use-aws-privatelinks.rst index fa9fa595af..15cb40405d 100644 --- a/docs/platform/howto/use-aws-privatelinks.rst +++ b/docs/platform/howto/use-aws-privatelinks.rst @@ -18,14 +18,12 @@ routing between the VPC, you can use any private IP range for the VPC, unless you also want to connect to the project VPC using VPC peering connections. This means that overlaps in the IP range are not an issue. -You can use either the `Aiven Console `__ -or the :doc:`Aiven CLI ` to set up -AWS PrivateLink. You also need the AWS console or CLI to create a VPC endpoint. +To set up AWS PrivateLink, you need to use the :doc:`Aiven CLI `. You also need the AWS console or CLI to create a VPC endpoint. **Note:** Aiven for Apache Cassandra® and Aiven for M3 services do not currently support AWS PrivateLink. -#. Create an AWS PrivateLink resource on the Aiven service: +#. Create an AWS PrivateLink resource on the Aiven service. The Amazon Resource Name (ARN) for the principals that are allowed to connect to the VPC endpoint service and the AWS network load @@ -34,56 +32,36 @@ currently support AWS PrivateLink. or a given role. Only give permissions to roles that you trust, as an allowed role can connect from any VPC. - - Using the Aiven CLI, run the following command including your AWS - account ID, the access scope, and the name of your Aiven service: + Use the Aiven CLI to run the following command including your AWS account ID, the access scope, and the name of your Aiven service: - .. code:: + .. code:: bash - avn service privatelink aws create --principal arn:aws:iam::$AWS_account_ID:$access_scope $Aiven_service_name + avn service privatelink aws create --principal arn:aws:iam::$AWS_account_ID:$access_scope $Aiven_service_name - For example: + For example: - .. code:: + .. code:: bash - avn service privatelink aws create --principal arn:aws:iam::012345678901:user/mwf my-kafka + avn service privatelink aws create --principal arn:aws:iam::012345678901:user/mwf my-kafka - - Using `Aiven Console `__: - - #. Log in to `Aiven Console `__ and select the service that you - want to use. - - #. On the **Overview** page of your service, select **Network** from the sidebar. - - #. On the **Network** page, select **Create Privatelink** . - - #. In the **Create Privatelink** window, enter the Amazon Resource Names (ARN) for the principals that you want to use, and select **Create** . - - This creates an AWS network load balancer dedicated to your Aiven - service and attaches it to an AWS VPC endpoint service that you can - later use to connect to your account's VPC endpoint. + This creates an AWS network load balancer dedicated to your Aiven service and attaches it to an AWS VPC endpoint service that you can later use to connect to your account's VPC endpoint. - The PrivateLink resource stays in the initial ``creating`` state - for up to a few minutes while the load balancer is being launched. - After the load balancer and VPC endpoint service have been created, - the state changes to ``active`` and the ``aws_service_id`` and - ``aws_service_name`` values are set. + The PrivateLink resource stays in the initial ``creating`` state for up to a few minutes while the load balancer is being launched. After the load balancer and VPC endpoint service have been created, the state changes to ``active`` and the ``aws_service_id`` and ``aws_service_name`` values are set. #. In the AWS CLI, run the following command to create a VPC endpoint: - .. code:: + .. code:: bash aws ec2 --region eu-west-1 create-vpc-endpoint --vpc-endpoint-type Interface --vpc-id $your_vpc_id --subnet-ids $space_separated_list_of_subnet_ids --security-group-ids $security_group_ids --service-name com.amazonaws.vpce.eu-west-1.vpce-svc-0b16e88f3b706aaf1 - - Replace the ``--service-name`` value with the value shown next to - **Network** > **AWS service name** in `Aiven Console `__ or by - running the following command in the Aiven CLI: - .. code:: + Replace the ``--service-name`` value with the value shown either in the `Aiven Console `__ > **Service settings** page > **Cloud and network** section > actions (**...**) menu > **Edit AWS PrivateLink** > **AWS service name** or as an output of the following Aiven CLI command: + + .. code:: bash avn service privatelink aws get aws_service_name - + Note that for fault tolerance, you should specify a subnet ID for each availability zone in the region. The security groups determine the instances that are allowed to connect to the endpoint network @@ -92,12 +70,12 @@ currently support AWS PrivateLink. Alternatively, you can create the VPC endpoint in `AWS Console `__ under **VPC** > **Endpoints** > **Create endpoint** . See the `AWS documentation `__ for details. .. note:: - + For Aiven for Apache Kafka® services, the security group for the VPC endpoint must allow ingress in the port range ``10000-31000`` to accommodate the pool of Kafka broker ports used in our PrivateLink implementation. - + It takes a while before the endpoint is ready to use as AWS provisions network interfaces to each of the subnets and connects them to the Aiven VPC endpoint service. Once the AWS endpoint state @@ -113,7 +91,7 @@ currently support AWS PrivateLink. ``user_config.privatelink_access.`` to ``true`` for the components that you want to enable. For example: - .. code:: + .. code:: bash avn service update -c privatelink_access.kafka=true $Aiven_service_name avn service update -c privatelink_access.kafka_connect=true $Aiven_service_name @@ -122,16 +100,14 @@ currently support AWS PrivateLink. - In `Aiven Console `__: - #. Go to the **Overview** page of your service, and scroll down to **Advanced - configuration**. - - #. Select **Change**, add the components that you - want, and switch them on. + #. On the **Overview** page of your service, select **Service settings** from the sidebar. + #. On the **Service settings** page, navigate to the **Cloud and network** section and select **More network configurations** from the actions (**...**) menu. + #. In the **Network configuration** window, select **Add configuration options**. In the search field, enter ``privatelink_access``. From the displayed component names, select the names of the components that you want to switch on. .. image:: /images/platform/howto/use-aws-privatelink_image1.png :alt: Aiven Console private link configuration - #. Select **Save advanced configuration** . + #. Select the toggle switches for the selected components to switch them on. Select **Save configuration**. It takes a couple of minutes before connectivity is available after you enable a service component. This is because AWS requires an AWS @@ -148,7 +124,7 @@ Acquire connection information One AWS PrivateLink connection '''''''''''''''''''''''''''''' -If you have one private endpoint connected to your Aiven service, you can preview the connection information (URI, hostname, or port required to access the service through the private endpoint) in `Aiven Console `_ > the service's **Overview** page > the **Connection information** section, where you'll also find the switch for the ``privatelink`` access route. ``privatelink``-access-route values for ``host`` and ``port`` differ from those for the ``dynamic`` access route used by default to connect to the service. +If you have one private endpoint connected to your Aiven service, you can preview the connection information (URI, hostname, or port required to access the service through the private endpoint) in `Aiven Console `__ > the service's **Overview** page > the **Connection information** section, where you'll also find the switch for the ``privatelink`` access route. ``privatelink``-access-route values for ``host`` and ``port`` differ from those for the ``dynamic`` access route used by default to connect to the service. .. note:: @@ -165,7 +141,7 @@ To acquire connection information for your service component using AWS PrivateLi * For SSL connection information for your service component using AWS PrivateLink, run the following command: - .. code-block:: bash + .. code:: bash avn service connection-info UTILITY_NAME SERVICE_NAME --privatelink-connection-id PRIVATELINK_CONNECTION_ID @@ -177,7 +153,7 @@ To acquire connection information for your service component using AWS PrivateLi * For SASL connection information for Aiven for Apache Kafka® service components using AWS PrivateLink, run the following command: - .. code-block:: bash + .. code:: bash avn service connection-info UTILITY_NAME SERVICE_NAME --privatelink-connection-id PRIVATELINK_CONNECTION_ID -a sasl @@ -201,23 +177,23 @@ allowed to connect a VPC endpoint: - Use the ``update`` command of the Aiven CLI: - .. code:: + .. code:: bash - # avn service privatelink aws update --principal arn:aws:iam::$AWS_account_ID:$access_scope $Aiven_service_name + avn service privatelink aws update --principal arn:aws:iam::$AWS_account_ID:$access_scope $Aiven_service_name - **Note:** When you add an entry, also include the ``--principal`` arguments for existing entries. + .. note:: + + When you add an entry, also include the ``--principal`` arguments for existing entries. - In `Aiven Console `__: #. Select your service from the **Services** page. - #. Select **Network** from the sidebar. - - #. In the **Network** page, select **Edit principals**. + #. On the **Overview** page, select **Service settings** from the sidebar. - #. Enter the principals that you want to include. + #. On the **Service settings** page, navigate to the **Cloud and network** section and select **Edit AWS PrivateLink** from the actions (**...**) menu. - #. Select **Save** . + #. In the **Edit AWS PrivateLink** window, enter the principals that you want to include in the **Principal ARNs** field and select **Save** . .. _h_8de68d5894: @@ -238,10 +214,12 @@ Deleting a privatelink connection - Using `Aiven Console `__: - #. Select **Network** from the sidebar on your service's page. + #. Select your service from the **Services** page. + + #. On the **Overview** page, select **Service settings** from the sidebar. - #. Select the trash can icon on the right of the **AWS PrivateLink** row. + #. On the **Service settings** page, navigate to the **Cloud and network** section and select **Delete AWS PrivateLink** from the actions (**...**) menu. - #. Select **Confirm** . + #. In the **Confirmation** window, select **Delete** . This deletes the AWS load balancer and VPC service endpoint. diff --git a/docs/platform/howto/use-azure-privatelink.rst b/docs/platform/howto/use-azure-privatelink.rst index 1f8c33de14..bd802588f7 100644 --- a/docs/platform/howto/use-azure-privatelink.rst +++ b/docs/platform/howto/use-azure-privatelink.rst @@ -151,11 +151,15 @@ To enable Private Link access for your service in the Aiven CLI, set ``user_conf To enable Private Link access in `Aiven Console `_: -#. Select the service that you want to enable access to. -#. On the **Overview** page of your service, in the **Advanced configuration** section, select **Change**. -#. Select **Add configuration option** > ``privatelink_access.`` for the components that you want to enable. -#. Toggle the switch next to the components to set the values to true. -#. Select **Save advanced configuration**. +#. On the **Overview** page of your service, select **Service settings** from the sidebar. +#. On the **Service settings** page, navigate to the **Cloud and network** section and select **More network configurations** from the actions (**...**) menu. +#. In the **Network configuration** window, take the following actions: + + #. Select **Add configuration options**. + #. In the search field, enter ``privatelink_access``. + #. From the displayed component names, select the names of the components that you want to enable (``privatelink_access.``). + #. Select the toggle switches for the selected components to enable them. + #. Select **Save configuration**. .. Tip:: @@ -186,9 +190,9 @@ To acquire connection information for your service component using Azure Private * For SSL connection information for your service component using Azure Private Link, run the following command: -.. code-block:: bash + .. code-block:: bash - avn service connection-info UTILITY_NAME SERVICE_NAME -p PRIVATELINK_CONNECTION_ID + avn service connection-info UTILITY_NAME SERVICE_NAME -p PRIVATELINK_CONNECTION_ID .. topic:: Where @@ -198,9 +202,9 @@ To acquire connection information for your service component using Azure Private * For SASL connection information for Aiven for Apache Kafka® service components using Azure Private Link, run the following command: -.. code-block:: bash + .. code-block:: bash - avn service connection-info UTILITY_NAME SERVICE_NAME -p PRIVATELINK_CONNECTION_ID -a sasl + avn service connection-info UTILITY_NAME SERVICE_NAME -p PRIVATELINK_CONNECTION_ID -a sasl .. topic:: Where diff --git a/docs/platform/howto/use-google-private-service-connect.rst b/docs/platform/howto/use-google-private-service-connect.rst index d5fd317cbf..842994b692 100644 --- a/docs/platform/howto/use-google-private-service-connect.rst +++ b/docs/platform/howto/use-google-private-service-connect.rst @@ -84,36 +84,39 @@ Step 3: Approve the created connection 1. Update the state of Private Service Connect connections for your Aiven service by running -.. code:: shell + .. code:: shell - avn service privatelink google refresh MY_SERVICE_NAME + avn service privatelink google refresh MY_SERVICE_NAME 2. Retry the following command until it returns the pending-user-approval status: -.. code:: shell + .. code:: shell - avn service privatelink google connection list MY_SERVICE_NAME + avn service privatelink google connection list MY_SERVICE_NAME -.. code:: shell + .. code:: shell - PRIVATELINK_CONNECTION_ID PSC_CONNECTION_ID STATE USER_IP_ADDRESS - ========================= ================= ===================== =============== - plc3fd852bec98 12870921937223780 pending-user-approval null + PRIVATELINK_CONNECTION_ID PSC_CONNECTION_ID STATE USER_IP_ADDRESS + ========================= ================= ===================== =============== + plc3fd852bec98 12870921937223780 pending-user-approval null -.. note:: - * PSC_CONNECTION_ID is the identifier assigned to Google for the connection, and you can use it to verify that the connection is indeed matching your Private Service Connect endpoint. - * PRIVATELINK_CONNECTION_ID is an Aiven internal identifier for the connection, which is needed in the final connection approval step. + .. note:: + + * PSC_CONNECTION_ID is the identifier assigned to Google for the connection, and you can use it to verify that the connection is indeed matching your Private Service Connect endpoint. + + * PRIVATELINK_CONNECTION_ID is an Aiven internal identifier for the connection, which is needed in the final connection approval step. 3. To enable a connection, approve it. -.. note:: - By approving the connection, you provide the IP address assigned to your PSC endpoint - whether automatically assigned or static. Aiven uses this IP address for pointing the service DNS records necessary for the clients to reach the Aiven service through the Private Service Connect connection. + .. note:: + + By approving the connection, you provide the IP address assigned to your PSC endpoint - whether automatically assigned or static. Aiven uses this IP address for pointing the service DNS records necessary for the clients to reach the Aiven service through the Private Service Connect connection. -To approve the connection, run the following approval command: + To approve the connection, run the following approval command: -.. code:: shell + .. code:: shell - avn service privatelink google connection approve MY_SERVICE_NAME --privatelink-connection-id PRIVATELINK_CONNECTION_ID --user-ip-address PSC_ENDPOINT_IP_ADDRESS + avn service privatelink google connection approve MY_SERVICE_NAME --privatelink-connection-id PRIVATELINK_CONNECTION_ID --user-ip-address PSC_ENDPOINT_IP_ADDRESS As a result, the connection initially transitions to the user-approved state. @@ -127,13 +130,13 @@ As a result, the connection initially transitions to the user-approved state. ========================= ================= ============= =============== plc3fd852bec98 12870921937223780 user-approved 10.0.0.100 -You may be need to run the ``avn service privatelink google refresh`` command at this point since updates to service attachment accept lists are not immediately reflected in the states of returned connected endpoints. +You may need to run the ``avn service privatelink google refresh`` command at this point since updates to service attachment accept lists are not immediately reflected in the states of returned connected endpoints. .. code:: shell avn service privatelink google refresh MY_SERVICE_NAME -After establishing the connection and populating DNS records , the connection appears as ``active``. +After establishing the connection and populating DNS records, the connection appears as ``active``. .. code:: shell @@ -166,11 +169,15 @@ To enable Private Service Connect access for your service in the Aiven CLI, set To enable Private Link access in `Aiven Console `_, take the following steps: -1. Select the service that you want to enable access to. -2. On the **Overview** page of your service, in the **Advanced configuration** section, select **Change**. -3. Select **Add configuration option**, and select the ``privatelink_access.`` option for the components that you want to enable. -4. Toggle the switch next to the components to set the values to ``true``. -5. Select **Save advanced configuration**. +#. On the **Overview** page of your service, select **Service settings** from the sidebar. +#. On the **Service settings** page, navigate to the **Cloud and network** section and select **More network configurations** from the actions (**...**) menu. +#. In the **Network configuration** window, take the following actions: + + #. Select **Add configuration options**. + #. In the search field, enter ``privatelink_access``. + #. From the displayed component names, select the names of the components that you want to enable (``privatelink_access.``). + #. Select the toggle switches for the selected components to enable them. + #. Select **Save configuration**. .. Tip:: @@ -195,9 +202,9 @@ To acquire connection information for your service component using Private Servi * For SSL connection information for your service component using Private Service Connect, run the following command: -.. code-block:: bash + .. code-block:: bash - avn service connection-info UTILITY_NAME SERVICE_NAME -p PRIVATELINK_CONNECTION_ID + avn service connection-info UTILITY_NAME SERVICE_NAME -p PRIVATELINK_CONNECTION_ID .. topic:: Where @@ -207,9 +214,9 @@ To acquire connection information for your service component using Private Servi * For SASL connection information for Aiven for Apache Kafka® service components using Private Service Connect, run the following command: -.. code-block:: bash + .. code-block:: bash - avn service connection-info UTILITY_NAME SERVICE_NAME -p PRIVATELINK_CONNECTION_ID -a sasl + avn service connection-info UTILITY_NAME SERVICE_NAME -p PRIVATELINK_CONNECTION_ID -a sasl .. topic:: Where diff --git a/docs/platform/howto/vnet-peering-azure.rst b/docs/platform/howto/vnet-peering-azure.rst index 6a675188c1..91df49535e 100644 --- a/docs/platform/howto/vnet-peering-azure.rst +++ b/docs/platform/howto/vnet-peering-azure.rst @@ -48,9 +48,9 @@ Using the Azure CLI: az account clear az login -| This should open a window in your browser prompting to choose an Azure - account to log in with. An account with at least the **Application - administrator** role assignment will be needed for the later steps. +This should open a window in your browser prompting to choose an Azure +account to log in with. An account with at least the **Application +administrator** role assignment will be needed for the later steps. If you manage multiple Azure subscriptions, also configure the Azure CLI to default to the correct subscription for the subsequent commands. This @@ -107,8 +107,8 @@ as ``$user_app_secret`` below 5. find the id properties of your virtual network ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -This can be found in the Azure portal in "Virtual networks" -> name of -your network -> “JSON View” -> "Resource ID", or using +This can be found in the Azure portal in **Virtual networks** > name of +your network > **JSON View** > **Resource ID**, or using .. code:: @@ -117,11 +117,11 @@ your network -> “JSON View” -> "Resource ID", or using Save the ``id`` field which will be referred to as ``$user_vnet_id`` . Also grab -- the Azure Subscription ID ("Properties" -> "Subscription ID") or the +- the Azure Subscription ID (**Properties** > **Subscription ID**) or the part after ``/subscriptions/`` in the resource ID. This is referred to as ``$user_subscription_id`` -- the resource group name ("Properties" -> "Resource group") or the +- the resource group name (**Properties** > **Resource group**) or the ``resourceGroup`` field in the output. This is referred to as ``$user_resource_group`` @@ -220,8 +220,8 @@ with ~~~~~~~~~~~~~~~~~~~~~~~~~~ The ID of your AD tenant will be needed in the next step. Find it from -the Azure portal from "Azure Active Directory" -> "Properties" -> -"Directory ID" or with the Azure CLI using +the Azure portal from **Azure Active Directory** > **Properties** > +**Directory ID** or with the Azure CLI using .. code:: diff --git a/docs/platform/howto/vpc-peering-upcloud.rst b/docs/platform/howto/vpc-peering-upcloud.rst index 4900060fe3..f2fb38501e 100644 --- a/docs/platform/howto/vpc-peering-upcloud.rst +++ b/docs/platform/howto/vpc-peering-upcloud.rst @@ -150,17 +150,17 @@ To refresh the DHCP lease for a network interface, run the following commands: 1. To clear the existing DHCP lease - .. code-block:: bash + .. code-block:: bash - dhclient -r NETWORK_INTERFACE_NAME + dhclient -r NETWORK_INTERFACE_NAME 2. To request a renewal of the DHCP lease - .. code-block:: bash + .. code-block:: bash - dhclient NETWORK_INTERFACE_NAME + dhclient NETWORK_INTERFACE_NAME -Related reading +Related pages --------------- * :doc:`Manage Virtual Private Cloud (VPC) peering ` diff --git a/docs/platform/reference.rst b/docs/platform/reference.rst deleted file mode 100644 index aaa8baf103..0000000000 --- a/docs/platform/reference.rst +++ /dev/null @@ -1,16 +0,0 @@ -:orphan: - -Reference -========= - -Reference materials for working with the Aiven platform using the Aiven tools: - -* :doc:`Aiven Console ` - -* :doc:`Aiven CLI ` - -* :doc:`Aiven API ` - -* :doc:`Aiven Terraform Provider ` - -* :doc:`Aiven Operator for Kubernetes ` diff --git a/docs/platform/reference/eol-for-major-versions.rst b/docs/platform/reference/eol-for-major-versions.rst index 73a364bff2..e7fd0402aa 100644 --- a/docs/platform/reference/eol-for-major-versions.rst +++ b/docs/platform/reference/eol-for-major-versions.rst @@ -1,238 +1,246 @@ -EOL for major versions of Aiven Services -======================================== +End of life for major versions of Aiven services and tools +========================================================== -End of Life (EOL) refers to the deadline after which affected Aiven services are retired and will be no longer supported or maintained. +End of life (EOL) is the date after which Aiven services and tools are no longer supported or maintained. -Since August 2020, Aiven aims to follow the EOL schedule set by the -original authors and maintainers of the open source software, aka -upstream projects. Once the upstream project retires a specific version, -they do not receive security updates and critical bug fixes anymore by -the maintainers. +Aiven services +-------------- -Continued use of outdated services means that they no longer offer our -customers the level of protection their business needs. Therefore, by -following the upstream project's EOL schedule, we ensure that Aiven -services are always running on supported versions of the open source -software. +Aiven aims to follow the EOL schedule set by the original authors and maintainers of the open source software (the upstream projects). Once the upstream project retires a specific version, they do not receive security updates and critical bug fixes anymore by the maintainers. -**Version numbering** -~~~~~~~~~~~~~~~~~~~~~ +Outdated services don't offer the level of protection our customers need, so Aiven follows the upstream project's EOL schedule to ensure that Aiven services are always running on supported versions. -Aiven services inherit the upstream project's software versioning -scheme. Depending on the service, a major version can be either a single -digit (e.g. PostgreSQL® 14) or ``major.minor`` (e.g. Kafka® 3.2). The -exact version of the service is visible in `Aiven Console `_ once the -service is up and running. +Version numbering +''''''''''''''''' -Aiven for Elasticsearch ------------------------ +Aiven services inherit the upstream project's software versioning scheme. Depending on the service, a major version can be either a single digit (for example, PostgreSQL® 14) or in the format ``major.minor`` (for example, Kafka® 3.2). The exact version of the service is visible in the `Aiven Console `_ when the service is running. + +Aiven for Elasticsearch® +'''''''''''''''''''''''' Aiven for Elasticsearch major versions will reach EOL on the same date as the upstream open source project's EOL. `Aiven for OpenSearch® `_ is Aiven's service offering for Elasticsearch. -.. container:: intercom-interblocks-table-container - - - +-------------+---------------+--------------------+ - | **Version** | **Aiven EOL** | **Upstream EOL** | - | | | | - +-------------+---------------+--------------------+ - | 2.x | 2020-10-30 | 2018-02-28 | - +-------------+---------------+--------------------+ - | 5.x | 2020-10-30 | 2019-03-11 | - +-------------+---------------+--------------------+ - | 6.x | 2020-11-20 | 2020-11-20 | - +-------------+---------------+--------------------+ - | 7.x | 2022-03-23 | 2022-05-11 (v7.10) | - +-------------+---------------+--------------------+ ++-------------+---------------+--------------------+ +| **Version** | **Aiven EOL** | **Upstream EOL** | +| | | | ++-------------+---------------+--------------------+ +| 2.x | 2020-10-30 | 2018-02-28 | ++-------------+---------------+--------------------+ +| 5.x | 2020-10-30 | 2019-03-11 | ++-------------+---------------+--------------------+ +| 6.x | 2020-11-20 | 2020-11-20 | ++-------------+---------------+--------------------+ +| 7.x | 2022-03-23 | 2022-05-11 (v7.10) | ++-------------+---------------+--------------------+ Aiven for OpenSearch® ---------------------- - -Aiven for OpenSearch® is the open source continuation of the original Elasticsearch service. +''''''''''''''''''''' -.. container:: intercom-interblocks-table-container +Aiven for OpenSearch® is the open source continuation of the original Elasticsearch service. The EOL for Aiven for OpenSearch® is generally dependent on the upstream project. ++-------------+------------------------+--------------------+------------------+ +| **Version** | **Aiven EOL** | **Service creation | **Upstream EOL** | +| | | supported until** | | ++-------------+------------------------+--------------------+------------------+ +| 1.x | `TBA*` | `TBA*` | `TBA*` | ++-------------+------------------------+--------------------+------------------+ +| 2.x | `TBA*` | `TBA*` | `TBA*` | ++-------------+------------------------+--------------------+------------------+ - +-------------+------------------------+------------------+------------------+ - | **Version** | **Aiven EOL** | **Availability | **Upstream EOL** | - | | | end on Aiven | | - | | | Platform** | | - +-------------+------------------------+------------------+------------------+ - | 1.x | `TBA*` | `TBA*` | `TBA*` | - +-------------+------------------------+------------------+------------------+ - | 2.x | `TBA*` | `TBA*` | `TBA*` | - +-------------+------------------------+------------------+------------------+ - - `*` To be announced - -.. note:: - The EOL for Aiven for OpenSearch® is generally dependent on the upstream project. +`*` To be announced Aiven for PostgreSQL® ---------------------- +''''''''''''''''''''' Aiven for PostgreSQL® major versions will reach EOL on the same date as -the upstream open source project's EOL . - -.. container:: intercom-interblocks-table-container - - +-------------+---------------+------------------+------------------+ - | **Version** | **Aiven EOL** | **Availability | **Availability | - | | | end on Aiven | start on Aiven | - | | | Platform** | Platform** | - +-------------+---------------+------------------+------------------+ - | 9.5 | 2021-04-15 | 2021-01-26 | 2015-12-22 | - +-------------+---------------+------------------+------------------+ - | 9.6 | 2021-11-11 | 2021-05-11 | 2016-09-29 | - +-------------+---------------+------------------+------------------+ - | 10 | 2022-11-10 | 2022-05-10 | 2017-01-14 | - +-------------+---------------+------------------+------------------+ - | 11 | 2023-11-09 | 2023-05-09 | 2017-03-06 | - +-------------+---------------+------------------+------------------+ - | 12 | 2024-11-14 | 2024-05-14 | 2019-11-18 | - +-------------+---------------+------------------+------------------+ - | 13 | 2025-11-13 | 2025-05-13 | 2021-02-15 | - +-------------+---------------+------------------+------------------+ - | 14 | 2026-11-12 | 2026-05-12 | 2021-11-11 | - +-------------+---------------+------------------+------------------+ - | 15 | 2027-11-11 | 2027-05-12 | 2022-12-12 | - +-------------+---------------+------------------+------------------+ +the upstream open source project's EOL. + ++-------------+---------------+--------------------+--------------------+ +| **Version** | **Aiven EOL** | **Service creation | **Service creation | +| | | supported until** | supported from** | ++-------------+---------------+--------------------+--------------------+ +| 9.5 | 2021-04-15 | 2021-01-26 | 2015-12-22 | ++-------------+---------------+--------------------+--------------------+ +| 9.6 | 2021-11-11 | 2021-05-11 | 2016-09-29 | ++-------------+---------------+--------------------+--------------------+ +| 10 | 2022-11-10 | 2022-05-10 | 2017-01-14 | ++-------------+---------------+--------------------+--------------------+ +| 11 | 2023-11-09 | 2023-05-09 | 2017-03-06 | ++-------------+---------------+--------------------+--------------------+ +| 12 | 2024-11-14 | 2024-05-14 | 2019-11-18 | ++-------------+---------------+--------------------+--------------------+ +| 13 | 2025-11-13 | 2025-05-13 | 2021-02-15 | ++-------------+---------------+--------------------+--------------------+ +| 14 | 2026-11-12 | 2026-05-12 | 2021-11-11 | ++-------------+---------------+--------------------+--------------------+ +| 15 | 2027-11-11 | 2027-05-12 | 2022-12-12 | ++-------------+---------------+--------------------+--------------------+ .. _aiven-for-kafka: Aiven for Apache Kafka® ------------------------ - -Starting with v2.5, Aiven for Kafka® ``major.minor`` version will reach -EOL one year after it's made available on Aiven platform. - -.. container:: intercom-interblocks-table-container - - +-------------+---------------+------------------+------------------+ - | **Version** | **Aiven EOL** | **Availability | **Availability | - | | | end on Aiven | start on Aiven | - | | | Platform** | Platform** | - +-------------+---------------+------------------+------------------+ - | 1.0.x | 2021-02-01 | | 2017-11-01 | - +-------------+---------------+------------------+------------------+ - | 1.1.x | 2021-02-01 | | 2018-07-31 | - +-------------+---------------+------------------+------------------+ - | 2.0.x | 2021-02-01 | | 2018-07-30 | - +-------------+---------------+------------------+------------------+ - | 2.1.x | 2021-02-01 | | 2018-12-04 | - +-------------+---------------+------------------+------------------+ - | 2.2.x | 2021-02-01 | | 2019-04-19 | - +-------------+---------------+------------------+------------------+ - | 2.3.x | 2021-08-13 | 2021-08-13 | 2019-09-05 | - +-------------+---------------+------------------+------------------+ - | 2.4.x | 2021-08-13 | 2021-08-13 | 2019-10-21 | - +-------------+---------------+------------------+------------------+ - | 2.5.x | 2021-08-13 | 2021-08-13 | 2020-05-05 | - +-------------+---------------+------------------+------------------+ - | 2.6.x | 2021-08-13 | 2021-08-13 | 2020-08-13 | - +-------------+---------------+------------------+------------------+ - | 2.7.x | 2022-01-24 | 2021-10-21 | 2021-01-21 | - +-------------+---------------+------------------+------------------+ - | 2.8.x | 2022-06-02 | 2022-01-26 | 2021-04-26 | - +-------------+---------------+------------------+------------------+ - | 3.0 | 2022-11-22 | 2022-07-04 | 2021-11-22 | - +-------------+---------------+------------------+------------------+ - | 3.1 | 2023-02-14 | 2022-10-26 | 2022-02-14 | - +-------------+---------------+------------------+------------------+ - | 3.2 | 2023-06-27 | 2023-03-28 | 2022-06-21 | - +-------------+---------------+------------------+------------------+ - | 3.3 | 2023-12-12 | 2023-09-12 | 2022-12-23 | - +-------------+---------------+------------------+------------------+ - | 3.4 | 2024-05-13 | 2024-02-13 | 2023-05-09 | - +-------------+---------------+------------------+------------------+ - | 3.5 | 2024-07-31 | 2024-03-30 | 2023-07-31 | - +-------------+---------------+------------------+------------------+ - | 3.6 | 2024-10-18 | 2024-07-18 | 2023-10-18 | - +-------------+---------------+------------------+------------------+ +''''''''''''''''''''''' + +Starting with v2.5, Aiven for Kafka® versions will reach +EOL one year after they are made available on the Aiven platform. + ++-------------+---------------+--------------------+--------------------+ +| **Version** | **Aiven EOL** | **Service creation | **Service creation | +| | | supported until** | supported from** | ++-------------+---------------+--------------------+--------------------+ +| 1.0.x | 2021-02-01 | | 2017-11-01 | ++-------------+---------------+--------------------+--------------------+ +| 1.1.x | 2021-02-01 | | 2018-07-31 | ++-------------+---------------+--------------------+--------------------+ +| 2.0.x | 2021-02-01 | | 2018-07-30 | ++-------------+---------------+--------------------+--------------------+ +| 2.1.x | 2021-02-01 | | 2018-12-04 | ++-------------+---------------+--------------------+--------------------+ +| 2.2.x | 2021-02-01 | | 2019-04-19 | ++-------------+---------------+--------------------+--------------------+ +| 2.3.x | 2021-08-13 | 2021-08-13 | 2019-09-05 | ++-------------+---------------+-------------------+---------------------+ +| 2.4.x | 2021-08-13 | 2021-08-13 | 2019-10-21 | ++-------------+---------------+--------------------+--------------------+ +| 2.5.x | 2021-08-13 | 2021-08-13 | 2020-05-05 | ++-------------+---------------+--------------------+--------------------+ +| 2.6.x | 2021-08-13 | 2021-08-13 | 2020-08-13 | ++-------------+---------------+--------------------+--------------------+ +| 2.7.x | 2022-01-24 | 2021-10-21 | 2021-01-21 | ++-------------+---------------+--------------------+--------------------+ +| 2.8.x | 2022-06-02 | 2022-01-26 | 2021-04-26 | ++-------------+---------------+--------------------+--------------------+ +| 3.0 | 2022-11-22 | 2022-07-04 | 2021-11-22 | ++-------------+---------------+--------------------+--------------------+ +| 3.1 | 2023-02-14 | 2022-10-26 | 2022-02-14 | ++-------------+---------------+--------------------+--------------------+ +| 3.2 | 2023-06-27 | 2023-03-28 | 2022-06-21 | ++-------------+---------------+--------------------+--------------------+ +| 3.3 | 2023-12-12 | 2023-09-12 | 2022-12-23 | ++-------------+---------------+--------------------+--------------------+ +| 3.4 | 2024-05-13 | 2024-02-13 | 2023-05-09 | ++-------------+---------------+--------------------+--------------------+ +| 3.5 | 2024-07-31 | 2024-03-30 | 2023-07-31 | ++-------------+---------------+--------------------+--------------------+ +| 3.6 | 2024-10-18 | 2024-07-18 | 2023-10-18 | ++-------------+---------------+--------------------+--------------------+ .. _h_0f2929c770: -Aiven for Cassandra® --------------------- - -Starting with v4, Aiven for Cassandra® ``major`` version will reach EOL -six months after it's made available on Aiven platform. +Aiven for Apache Cassandra® +''''''''''''''''''''''''''' -.. container:: intercom-interblocks-table-container +Starting with v4, Aiven for Cassandra® major versions will reach EOL +six months after they are made available on the Aiven platform. - +-------------+---------------+------------------+------------------+ - | **Version** | **Aiven EOL** | **Availability | **Availability | - | | | end on Aiven | start on Aiven | - | | | Platform** | Platform** | - +-------------+---------------+------------------+------------------+ - | 3 | 2022-07-27 | 2022-04-27 | 2018-11-08 | - +-------------+---------------+------------------+------------------+ - | 4 | N/A | N/A | 2021-12-09 | - +-------------+---------------+------------------+------------------+ ++-------------+---------------+--------------------+--------------------+ +| **Version** | **Aiven EOL** | **Service creation | **Service creation | +| | | supported until** | supported from** | ++-------------+---------------+--------------------+--------------------+ +| 3 | 2022-07-27 | 2022-04-27 | 2018-11-08 | ++-------------+---------------+--------------------+--------------------+ +| 4 | N/A | N/A | 2021-12-09 | ++-------------+---------------+--------------------+--------------------+ Aiven for M3DB --------------------- +'''''''''''''' -Starting from v1.5, Aiven for M3DB version will reach EOL six months after **newer major/minor version** is made available on Aiven platform. +Starting from v1.5, Aiven for M3DB versions will reach EOL six months after newer major and minor versions are made available on the Aiven platform. -.. container:: intercom-interblocks-table-container ++-------------+---------------+--------------------+--------------------+ +| **Version** | **Aiven EOL** | **Service creation | **Service creation | +| | | supported until** | supported from** | ++-------------+---------------+--------------------+--------------------+ +| 1.1 | 2023-09-01 | 2023-06-01 | 2021-02-23 | ++-------------+---------------+--------------------+--------------------+ +| 1.2 | 2023-09-01 | 2023-06-01 | 2021-10-11 | ++-------------+---------------+--------------------+--------------------+ +| 1.5 | N/A | N/A | 2022-05-05 | ++-------------+---------------+--------------------+--------------------+ - +-------------+---------------+------------------+------------------+ - | **Version** | **Aiven EOL** | **Availability | **Availability | - | | | end on Aiven | start on Aiven | - | | | Platform** | Platform** | - +-------------+---------------+------------------+------------------+ - | 1.1 | 2023-09-01 | 2023-06-01 | 2021-02-23 | - +-------------+---------------+------------------+------------------+ - | 1.2 | 2023-09-01 | 2023-06-01 | 2021-10-11 | - +-------------+---------------+------------------+------------------+ - | 1.5 | N/A | N/A | 2022-05-05 | - +-------------+---------------+------------------+------------------+ +EOL policy for major versions +''''''''''''''''''''''''''''' -Aiven service EOL policy for major versions -------------------------------------------- +The Aiven EOL policy is applicable only for services whose major versions are controlled until the customer. -Aiven EOL policy is applicable only for services whose major versions -are controlled by the customer. +It applies to both powered-on and powered-off services running the affected versions. -It applies to both **powered-on** and **powered-off** services running -the affected versions. +EOL notifications +''''''''''''''''' -EOL notification -~~~~~~~~~~~~~~~~ +When Aiven sets the EOL date for a service major version: -When Aiven defines the EOL date for a service major version, +- Customers receive an email notification along with instructions on the next steps. -- Customers will receive an EOL email announcement along with - instructions on the next steps. +- The `Aiven Console `_ shows an EOL alert for affected services. -- `Aiven Console `_ will also show an EOL alert for affected services. +- Email reminders are sent to customers monthly. -- Email reminders will be sent to customers on a monthly cadence. On - the month of the EOL date, the cadence shifts to weekly reminders. +- In the month of the EOL date, the weekly reminders are sent to customers. -Our recommendation ------------------- +EOL best practices +'''''''''''''''''' -We **highly recommend** customers to perform the version upgrade well -before EOL so that they can test compatibility for any breaking changes, -plan for unforeseen issues, and migrate to the newer version at their -own schedule. After the EOL date: +It's highly recommended to perform the version upgrade well before EOL so that you can test the compatibility for any breaking changes, plan for unforeseen issues, and migrate to the newer version on your own schedule. After the EOL date: -1. If the service is powered on, it's auto-upgraded to the latest version. +1. If the service is powered on, it's automatically upgraded to the latest version. 2. If the service is powered off, it's deleted. -Aiven platform offers database forking as an efficient tool to verify -the version upgrade so that customers can safely test compatibility -without committing their production services to a one-way upgrade. - -.. Tip:: - Navigate to the service's ``Overview`` page and scroll down until - you see a ``New database fork`` button. This will allow you to make a - separate new database service that is cloned from the current one's - backups. +Aiven offers :doc:`database forking ` as an efficient tool to test the version upgrade before upgrading their production services. + + +Aiven tools +----------- + +Aiven offers multiple tools for interacting with the Aiven platform and services. These include the Aiven CLI, the Aiven Provider for Terraform, and the Aiven Operator for Kubernetes®. + +Breaking changes in the Aiven API can result in new major versions of the Aiven tools. While backwards compatibility is typically maintained, certain changes require us to deprecate older versions of the tools. + +Aiven CLI +''''''''' + ++-------------+---------------+ +| **Version** | **Aiven EOL** | +| | | ++-------------+---------------+ +| 1.x | 2023-12-11 | ++-------------+---------------+ +| 2.x | 2023-12-11 | ++-------------+---------------+ +| 3.x | 2023-12-11 | ++-------------+---------------+ +| 4.x | `TBA*` | ++-------------+---------------+ + +Aiven Provider for Terraform +''''''''''''''''''''''''''''' + +Older versions will continue to work, but there will be no new features or bug fixes after the EOL date. + ++-------------+---------------+ +| **Version** | **Aiven EOL** | +| | | ++-------------+---------------+ +| 1.x | 2023-12-31 | ++-------------+---------------+ +| 2.x | 2023-12-31 | ++-------------+---------------+ +| 3.x | 2023-12-31 | ++-------------+---------------+ +| 4.x | `TBA*` | ++-------------+---------------+ + +Aiven Operator for Kubernetes +'''''''''''''''''''''''''''''' + ++-------------+---------------+ +| **Version** | **Aiven EOL** | +| | | ++-------------+---------------+ +| 0.x | `TBA*` | ++-------------+---------------+ diff --git a/docs/platform/reference/service-ip-address.rst b/docs/platform/reference/service-ip-address.rst index 8070b820c3..5665fea808 100644 --- a/docs/platform/reference/service-ip-address.rst +++ b/docs/platform/reference/service-ip-address.rst @@ -1,8 +1,8 @@ Default service IP address and hostname -========================================= +======================================= Default service IP address ----------------------------- +-------------------------- When a new Aiven service is created, the chosen cloud service provider will dynamically assign one or more public IP address from their connection pool. This IP address is not permanent, and with every service maintenance (in case of failover, maintenance upgrade or cloud migration) the IP address changes since Aiven creates a new node, migrates the existing data to it and then retire the old node. @@ -19,14 +19,23 @@ When a new service is being provisioned, its hostname is defined as follows: .. code:: - -.aivencloud.com + -.*.aivencloud.com -where: +where * ```` is the name of the service * ```` is the name of the project +* ``*`` is a variable component consisting of one or more levels of alphanumeric subdomains for the purpose of load balancing between DNS zones. + +.. note:: + + Second-level domain part of ``aivencloud.com`` might change to another name in the future if the domain becomes unavailable for updates. + +.. important:: + + Always use a fully-qualified domain name returned by Aiven API. Make sure your code doesn't put any constraints on the domain part or format of the returned service hostname. .. Note:: - If the ```` is too short or was recently used (e.g. if you drop and recreate a service with the same name) then the hostname format could be ``<3RANDOMLETTERS>-.aivencloud.com`` + If the ```` is too short or was recently used (for example, if you drop and recreate a service with the same name), the hostname format could be ``<3RANDOMLETTERS>-.*.aivencloud.com``. diff --git a/docs/products/cassandra/howto/connect-python.rst b/docs/products/cassandra/howto/connect-python.rst index 7215460654..c2705dde94 100644 --- a/docs/products/cassandra/howto/connect-python.rst +++ b/docs/products/cassandra/howto/connect-python.rst @@ -6,9 +6,11 @@ This example connects to an Aiven for Apache Cassandra® service from Python as Pre-requisites '''''''''''''' -Install the ``cassandra-driver`` library:: +Install the ``cassandra-driver`` library: - pip install cassandra-driver +.. code:: + + pip install cassandra-driver Variables ''''''''' diff --git a/docs/products/cassandra/howto/disable-cross-cluster-replication.rst b/docs/products/cassandra/howto/disable-cross-cluster-replication.rst index 87dc2e6def..4a44f08622 100644 --- a/docs/products/cassandra/howto/disable-cross-cluster-replication.rst +++ b/docs/products/cassandra/howto/disable-cross-cluster-replication.rst @@ -84,7 +84,7 @@ Use the :ref:`avn service terminate ` command to disa .. code-block:: bash - avn service terminate ccr_peer_service_name + avn service terminate --project PROJECT_NAME ccr_peer_service_name Disable CCR with API -------------------- diff --git a/docs/products/cassandra/howto/enable-cross-cluster-replication.rst b/docs/products/cassandra/howto/enable-cross-cluster-replication.rst index 345a46e10f..699ff78476 100644 --- a/docs/products/cassandra/howto/enable-cross-cluster-replication.rst +++ b/docs/products/cassandra/howto/enable-cross-cluster-replication.rst @@ -123,6 +123,7 @@ Create a new CCR service pair .. code-block:: bash avn service create \ + --project project_name \ --service-type cassandra \ --cloud cloud_region_name \ --plan service_plan_name \ @@ -138,6 +139,7 @@ Create a new CCR service pair .. code-block:: bash avn service create \ + --project project_name \ --service-type cassandra \ --cloud cloud_region_name \ --plan service_plan_name \ @@ -159,6 +161,7 @@ Use the :ref:`avn service create ` command to create a n .. code-block:: bash avn service create \ + --project project_name \ --service-type cassandra \ --cloud cloud_region_name \ --plan service_plan_name \ @@ -269,7 +272,7 @@ What's next * :doc:`Manage CCR on Aiven for Apache Cassandra ` * :doc:`Disable CCR on Aiven for Apache Cassandra ` -Related reading +Related pages --------------- * :doc:`About cross-cluster replication on Aiven for Apache Cassandra ` diff --git a/docs/products/cassandra/howto/manage-cross-cluster-replication.rst b/docs/products/cassandra/howto/manage-cross-cluster-replication.rst index d76571bbe6..dc791e2fd9 100644 --- a/docs/products/cassandra/howto/manage-cross-cluster-replication.rst +++ b/docs/products/cassandra/howto/manage-cross-cluster-replication.rst @@ -32,18 +32,19 @@ Change the service plan .. note:: - It's recommended to use the `Aiven console `_ for changing the plan for a CCR-enabled service. + It's recommended to use `Aiven Console `_ for changing the plan for a CCR-enabled service. 1. Log in to `Aiven Console `_. 2. From the **Services** page, select a CCR-enabled Aiven for Apache Cassandra service that you want to update. -3. In the **Overview** page of your service, navigate to **Service plan** and select **Change plan**. -4. In the **Change service plan** view, select a new plan you want to use for your service. +3. On the **Overview** page of your service, select **Service settings** from the sidebar. +4. On the **Service settings** page of your service, navigate to the **Service plan** section, and select **Change plan** from the **Actions** (**...**) menu. +5. In the **Change service plan** window, select a new plan you want to use for your service. .. tip:: You can also add extra disk space for your service by using the slider in the **Additional disk storage** section. -5. Select **Change**. +6. Select **Change**. .. topic:: Result @@ -62,14 +63,19 @@ Add an extra disk space 1. Log in to `Aiven Console `_. 2. From the **Services** page, select a CCR-enabled Aiven for Apache Cassandra service that you want to update. -3. In the **Overview** page of your service, navigate to the **Service plan** section and select **Add storage**. -4. In the **Upgrade service storage** view, use the slider to add extra disk space for your service. +3. On the **Overview** page of your service, select **Service settings** from the sidebar. +4. On the **Service settings** page of your service, navigate to the **Service plan** section, and select **Add additional storage** from the **Actions** (**...**) menu. +5. In the **Upgrade service storage** window, use the slider to add extra disk space for your service. .. tip:: You can also change your service plan by selecting **Change plan** in the **Your current plan** section. -You've added extra disk storage space for your CCR-enabled service and its CCR-replica service. +6. Select **Save changes**. + +.. topic:: Result + + You've added extra disk storage space for your CCR-enabled service and its CCR-replica service. .. _set-up-replication-factor: @@ -164,9 +170,9 @@ In a client library To configure the consistency level in a client library, add an extra parameter or object to define the consistency level on your software component before running a particular query. -.. topic:: Example:: - - In Python, you can specify ``consistency_level`` as a parameter for the ``SimpleStatement`` object. +.. topic:: Example: + + In Python, you can specify ``consistency_level`` as a parameter for the ``SimpleStatement`` object. .. code-block:: bash diff --git a/docs/products/cassandra/howto/use-nosqlbench-with-cassandra.rst b/docs/products/cassandra/howto/use-nosqlbench-with-cassandra.rst index f3eacecbc3..6bc466f9a2 100644 --- a/docs/products/cassandra/howto/use-nosqlbench-with-cassandra.rst +++ b/docs/products/cassandra/howto/use-nosqlbench-with-cassandra.rst @@ -66,7 +66,9 @@ The following parameters are used: * ``workload``: option calls a specific workload file that is compiled inside the ``nb`` executable and instructs ``nb`` to generate key/value pairs for a table called ``baselines.keyvalue``. You can read more on how to define custom workloads in the :ref:`dedicated documentation ` * ``phase``: refers to a specific point in the ``workload`` definition file and specifies the particular ``activity`` to run. In the example, the phase is ``schema`` which means that the nosqlbench will create the schema of the Cassandra keyspace. -To create client connections and produce data in the keyspace and tables created, you need to run the following command line, after substituting the placeholders for ``HOST``, ``PORT``, ``PASSWORD`` and ``SSL_CERTFILE``:: +To create client connections and produce data in the keyspace and tables created, you need to run the following command line, after substituting the placeholders for ``HOST``, ``PORT``, ``PASSWORD`` and ``SSL_CERTFILE``: + +.. code:: ./nb run \ host=HOST \ diff --git a/docs/products/cassandra/howto/zdm-proxy.rst b/docs/products/cassandra/howto/zdm-proxy.rst index 34512e7e24..cf35338de0 100644 --- a/docs/products/cassandra/howto/zdm-proxy.rst +++ b/docs/products/cassandra/howto/zdm-proxy.rst @@ -246,7 +246,7 @@ You can expect to receive output similar to the following: ``50`` and ``48`` are there in the target table since ZDM Proxy has forwarded the write request to the target service. ``42``, ``44``, and ``46`` are not there since ZDM Proxy has not sent the read request to the target service. -Related reading +Related pages --------------- * `zdm-proxy GitHub `_ diff --git a/docs/products/cassandra/overview.rst b/docs/products/cassandra/overview.rst index 90b7184d03..fa8b107adb 100644 --- a/docs/products/cassandra/overview.rst +++ b/docs/products/cassandra/overview.rst @@ -20,4 +20,4 @@ It is a truly distributed database where the individual nodes can communicate wi Get started with Aiven for Apache Cassandra ------------------------------------------- -Take your first steps with Aiven for Apache Cassandra® by following :doc:`Get started with Aiven for Apache Cassandra `. \ No newline at end of file +Take your first steps with Aiven for Apache Cassandra® by following :doc:`Get started with Aiven for Apache Cassandra `. diff --git a/docs/products/cassandra/reference.rst b/docs/products/cassandra/reference.rst index e7a9c4548d..752040f9fe 100644 --- a/docs/products/cassandra/reference.rst +++ b/docs/products/cassandra/reference.rst @@ -10,3 +10,7 @@ Aiven for Apache Cassandra® reference .. grid-item-card:: :doc:`Aiven for Apache Cassandra metrics available via Prometheus ` :shadow: md :margin: 2 2 0 0 + + .. grid-item-card:: :doc:`Aiven for Apache Cassandra metrics available via Datadog ` + :shadow: md + :margin: 2 2 0 0 diff --git a/docs/products/cassandra/reference/cassandra-metrics-datadog.rst b/docs/products/cassandra/reference/cassandra-metrics-datadog.rst new file mode 100644 index 0000000000..fcb998cbe2 --- /dev/null +++ b/docs/products/cassandra/reference/cassandra-metrics-datadog.rst @@ -0,0 +1,15 @@ +Aiven for Apache Cassandra® metrics available via Datadog +========================================================= + +Learn what metrics are available via Datadog for Aiven for Apache Cassandra® services. + +Get a metrics list for your service +----------------------------------- + +The list of Aiven for Apache Cassandra metrics available in Datadog corresponds to the list of metrics available for the open-source Apache Cassandra and can be checked in `Metrics `_. + +Related pages +--------------- + +* Check how to use Datadog with Aiven services in :doc:`Datadog and Aiven `. +* Check how to send metrics to Datadog from Aiven services in :doc:`Send metrics to Datadog `. diff --git a/docs/products/clickhouse.rst b/docs/products/clickhouse.rst index 5667e0b532..cf520b0017 100644 --- a/docs/products/clickhouse.rst +++ b/docs/products/clickhouse.rst @@ -7,7 +7,7 @@ Aiven for ClickHouse® is a fully managed distributed columnar database based on .. grid:: 1 2 2 2 - .. grid-item-card:: :doc:`Quickstart ` + .. grid-item-card:: :doc:`Quickstart ` :shadow: md :margin: 2 2 0 0 diff --git a/docs/products/clickhouse/concepts/clickhouse-tiered-storage.rst b/docs/products/clickhouse/concepts/clickhouse-tiered-storage.rst index 2d5becf515..fe786cfed5 100644 --- a/docs/products/clickhouse/concepts/clickhouse-tiered-storage.rst +++ b/docs/products/clickhouse/concepts/clickhouse-tiered-storage.rst @@ -76,7 +76,7 @@ What's next * :doc:`Enable tiered storage in Aiven for ClickHouse ` * :doc:`Configure data retention thresholds for tiered storage ` -Related reading +Related pages --------------- * :doc:`Check data volume distribution between different disks ` diff --git a/docs/products/clickhouse/concepts/disaster-recovery.rst b/docs/products/clickhouse/concepts/disaster-recovery.rst index 82e344fcea..519c629be7 100644 --- a/docs/products/clickhouse/concepts/disaster-recovery.rst +++ b/docs/products/clickhouse/concepts/disaster-recovery.rst @@ -12,7 +12,7 @@ With Aiven, HA for your service is supported in business and premium plans. See .. seealso:: - :ref:`Cross-availability-zone data distribution ` + :ref:`Cross-availability-zone data distribution ` .. _backup-and-restore: @@ -73,7 +73,7 @@ Aiven for ClickHouse has a few restrictions on the disaster recovery capability. For all the restrictions and limits for Aiven for ClickHouse, see :doc:`Aiven for ClickHouse limits and limitations `. -Related reading +Related pages --------------- * :doc:`Disaster Recovery testing scenarios ` diff --git a/docs/products/clickhouse/concepts/features-overview.rst b/docs/products/clickhouse/concepts/features-overview.rst index 670e0e12e6..30f5009a00 100644 --- a/docs/products/clickhouse/concepts/features-overview.rst +++ b/docs/products/clickhouse/concepts/features-overview.rst @@ -13,69 +13,44 @@ Effortless setup With the managed ClickHouse service, you can offload on Aiven multiple time-consuming and laborious operations on your data infrastructure: database initialization and configuration, cluster provisioning and management, or your infrastructure maintenance and monitoring are off your shoulders. -Pre-configured settings - The managed ClickHouse service is pre-configured with a rational set of parameters and settings appropriate for the plan you have selected. You can easily launch production-ready ClickHouse clusters in minutes in a cloud of your choice. +**Pre-configured settings:** The managed ClickHouse service is pre-configured with a rational set of parameters and settings appropriate for the plan you have selected. You can easily launch production-ready ClickHouse clusters in minutes in a cloud of your choice. Easy management --------------- -Scalability - You can seamlessly :doc:`scale your ClickHouse cluster ` horizontally or vertically as your data and needs change using the pre-packaged plans. Aiven for ClickHouse also supports :doc:`sharding ` as a horizontal cluster scaling strategy. - -Resource tags - You can assign metadata to your services in the form of tags. They help you organize, search, and filter Aiven resources. You can :doc:`tag your service ` by purpose, owner, environment, or any other criteria. - -Forking - Forking an Aiven for ClickHouse service creates a new database service containing the latest snapshot of an existing service. Forks don't stay up-to-date with the parent database, but you can write to them. It provides a risk-free way of working with your production data and schema. For example, you can use them to test upgrades, new schema migrations, or load test your app with a different plan. Learn how to :doc:`fork an Aiven service `. +- **Scalability:** You can seamlessly :doc:`scale your ClickHouse cluster ` horizontally or vertically as your data and needs change using the pre-packaged plans. Aiven for ClickHouse also supports :doc:`sharding ` as a horizontal cluster scaling strategy. +- **Resource tags:** You can assign metadata to your services in the form of tags. They help you organize, search, and filter Aiven resources. You can :doc:`tag your service ` by purpose, owner, environment, or any other criteria. +- **Forking:** Forking an Aiven for ClickHouse service creates a new database service containing the latest snapshot of an existing service. Forks don't stay up-to-date with the parent database, but you can write to them. It provides a risk-free way of working with your production data and schema. For example, you can use them to test upgrades, new schema migrations, or load test your app with a different plan. Learn how to :doc:`fork an Aiven service `. Effective maintenance --------------------- -Automatic maintenance updates - With 99.99% SLA, Aiven makes sure that the ClickHouse software and the underlying platform stays up-to-date with the latest patches and updates with zero downtime. You can set :doc:`maintenance windows ` for your service to make sure the changes occur during times that do not affect productivity. - -Backups and disaster recovery - Aiven for ClickHouse has automatic backups taken every 24 hours. The retention period depends on your plan tier. Check out the details on `Plan comparison `_. +- **Automatic maintenance updates:** With 99.99% SLA, Aiven makes sure that the ClickHouse software and the underlying platform stays up-to-date with the latest patches and updates with zero downtime. You can set :doc:`maintenance windows ` for your service to make sure the changes occur during times that do not affect productivity. +- **Backups and disaster recovery:** Aiven for ClickHouse has automatic backups taken every 24 hours. The retention period depends on your plan tier. Check out the details on `Plan comparison `_. Intelligent observability ------------------------- -Service health monitoring - Aiven for ClickHouse provides metrics and logs for your cluster at no additional charge. You can enable pre-integrated Aiven observability services, such as Grafana®, M3®, or OpenSearch® or push available metrics and logs to external observability tools, such as Prometheus, AWS CloudWatch, or Google Cloud Logging. For more details, see :doc:`Monitor Aiven for ClickHouse metrics `. - -Notifications and alerts - The service is pre-configured to alert you on, for example, your disk running out of space or CPU consumption running high when resource usage thresholds are exceeded. Email notifications are sent to admins and technical contacts of the project under which your service is created. Check :doc:`Receive technical notifications ` to learn how you can sign up for such alerts. +- **Service health monitoring:** Aiven for ClickHouse provides metrics and logs for your cluster at no additional charge. You can enable pre-integrated Aiven observability services, such as Grafana®, M3®, or OpenSearch® or push available metrics and logs to external observability tools, such as Prometheus, AWS CloudWatch, or Google Cloud Logging. For more details, see :doc:`Monitor Aiven for ClickHouse metrics `. +- **Notifications and alerts:** The service is pre-configured to alert you on, for example, your disk running out of space or CPU consumption running high when resource usage thresholds are exceeded. Email notifications are sent to admins and technical contacts of the project under which your service is created. Check :doc:`Receive technical notifications ` to learn how you can sign up for such alerts. Security and compliance ----------------------- -Single tenancy - Your service runs on dedicated instances, thus offering true data isolation that contributes to the optimal protection and an increased security. - -Network isolation - Aiven platform supports VPC peering as a mechanism for connecting directly to your ClickHouse service via private IP, thus providing a more secure network setup. The platform also supports PrivateLink connectivity. - -Regulatory compliance - ClickHouse runs on Aiven platform that is ISO 27001:2013, SOC2, GDPR, HIPAA, and PCI/DSS compliant. +- **Single tenancy:** Your service runs on dedicated instances, thus offering true data isolation that contributes to the optimal protection and an increased security. +- **Network isolation:** Aiven platform supports VPC peering as a mechanism for connecting directly to your ClickHouse service via private IP, thus providing a more secure network setup. The platform also supports PrivateLink connectivity. +- **Regulatory compliance:** ClickHouse runs on Aiven platform that is ISO 27001:2013, SOC2, GDPR, HIPAA, and PCI/DSS compliant. Role based Access Control (RBAC) To learn what kind of granular access is possible in Aiven for ClickHouse, check out :ref:`RBAC with Zookeeper `. -Zero lock-in - Aiven for ClickHouse offers compatibility with open source software (OSS), which protects you from software and vendor lock-in. You can easily migrate between clouds and regions. +- **Zero lock-in:** Aiven for ClickHouse offers compatibility with open source software (OSS), which protects you from software and vendor lock-in. You can easily migrate between clouds and regions. -.. seealso:: - - Check out more details on security and compliance in Aiven for ClickHouse in :doc:`Secure a managed ClickHouse® service `. +Check out more details on security and compliance in Aiven for ClickHouse in :doc:`Secure a managed ClickHouse® service `. Devops-friendly tools --------------------- -Automation - `Aiven Provider for Terraform `_ helps you automate the orchestration of your ClickHouse clusters. - -Command-line tooling - :doc:`Aiven CLI ` client provides greater flexibility of use for proficient administrators allowing scripting repetitive actions with ease. - -REST APIs - :doc:`Aiven APIs ` allow you to manage Aiven resources in a programmatic way using HTTP requests. The whole functionality available via Aiven Console is also available via APIs enabling you to build custom integrations with ClickHouse and the Aiven platform. +- **Automation:** `Aiven Provider for Terraform `_ helps you automate the orchestration of your ClickHouse clusters. +- **Command-line tooling:** :doc:`Aiven CLI ` client provides greater flexibility of use for proficient administrators allowing scripting repetitive actions with ease. +- **REST APIs:** :doc:`Aiven APIs ` allow you to manage Aiven resources in a programmatic way using HTTP requests. The whole functionality available via Aiven Console is also available via APIs enabling you to build custom integrations with ClickHouse and the Aiven platform. diff --git a/docs/products/clickhouse/concepts/federated-queries.rst b/docs/products/clickhouse/concepts/federated-queries.rst index def31557bc..57c42d1ba3 100644 --- a/docs/products/clickhouse/concepts/federated-queries.rst +++ b/docs/products/clickhouse/concepts/federated-queries.rst @@ -46,7 +46,7 @@ Limitations * Federated queries in Aiven for ClickHouse only support S3-compatible object storage providers for the time being. More external data sources coming soon! * Virtual tables are only supported for URL sources, using the URL table engine. Stay tuned for us supporting the S3 table engine in the future! -Related reading +Related pages --------------- * :doc:`Read and pull data from S3 object storages and web resources over HTTP ` diff --git a/docs/products/clickhouse/concepts/strings.rst b/docs/products/clickhouse/concepts/strings.rst index 8a137d489e..d5ac0f638e 100644 --- a/docs/products/clickhouse/concepts/strings.rst +++ b/docs/products/clickhouse/concepts/strings.rst @@ -41,11 +41,6 @@ ClickHouse supports a wide range of functions for working with JSON. With specif .. topic:: Examples - ``visitParamExtractString(params, name)`` - Parse the string in double quotes. - - ``JSONExtractString(json[, indices_or_keys]…)`` - Parse a JSON and extract a string. - - ``toJSONString`` - Convert a value of any data type to its JSON representation. + - ``visitParamExtractString(params, name)``: Parse the string in double quotes. + - ``JSONExtractString(json[, indices_or_keys]…)``: Parse a JSON and extract a string. + - ``toJSONString``: Convert a value of any data type to its JSON representation. diff --git a/docs/products/clickhouse/getting-started.rst b/docs/products/clickhouse/get-started.rst similarity index 96% rename from docs/products/clickhouse/getting-started.rst rename to docs/products/clickhouse/get-started.rst index be2f32913d..37f689e9b0 100644 --- a/docs/products/clickhouse/getting-started.rst +++ b/docs/products/clickhouse/get-started.rst @@ -22,9 +22,9 @@ Create a database 2. In the **Databases and tables** page, select **Create database** > **ClickHouse database**. 3. In the **Create ClickHouse database** window, enter a name for your database and select **Create database**. -.. note:: + .. note:: - All databases must be created through the web console. + All databases must be created through the web console. Connect to ClickHouse --------------------- diff --git a/docs/products/clickhouse/howto/check-data-tiered-storage.rst b/docs/products/clickhouse/howto/check-data-tiered-storage.rst index fa031fa39c..7fa7b1ad79 100644 --- a/docs/products/clickhouse/howto/check-data-tiered-storage.rst +++ b/docs/products/clickhouse/howto/check-data-tiered-storage.rst @@ -34,26 +34,26 @@ Run a data distribution check with the ClickHouse client (CLI) .. code-block:: bash - SELECT - database, - table, - disk_name, - formatReadableSize(sum(data_compressed_bytes)) AS total_size, - count(*) AS parts_count, - formatReadableSize(min(data_compressed_bytes)) AS min_part_size, - formatReadableSize(median(data_compressed_bytes)) AS median_part_size, - formatReadableSize(max(data_compressed_bytes)) AS max_part_size - FROM system.parts - GROUP BY - database, - table, - disk_name - ORDER BY - database ASC, - table ASC, - disk_name ASC - - You can expect to receive the following output: + SELECT + database, + table, + disk_name, + formatReadableSize(sum(data_compressed_bytes)) AS total_size, + count(*) AS parts_count, + formatReadableSize(min(data_compressed_bytes)) AS min_part_size, + formatReadableSize(median(data_compressed_bytes)) AS median_part_size, + formatReadableSize(max(data_compressed_bytes)) AS max_part_size + FROM system.parts + GROUP BY + database, + table, + disk_name + ORDER BY + database ASC, + table ASC, + disk_name ASC + + You can expect to receive the following output: .. code-block:: bash @@ -63,9 +63,8 @@ Run a data distribution check with the ClickHouse client (CLI) │ system │ query_log │ default │ 75.85 MiB │ 102 │ 7.51 KiB │ 12.36 KiB │ 1.55 MiB │ └──────────┴───────────┴───────────┴────────────┴─────────────┴───────────────┴──────────────────┴───────────────┘ -.. topic:: Result - The query returns a table with data distribution details for all databases and tables that belong to your service: the storage device they use, their total sizes as well as parts counts and sizing. +The query returns a table with data distribution details for all databases and tables that belong to your service: the storage device they use, their total sizes as well as parts counts and sizing. What's next ----------- @@ -73,7 +72,7 @@ What's next * :doc:`Transfer data between SSD and object storage ` * :doc:`Configure data retention thresholds for tiered storage ` -Related reading +Related pages --------------- * :doc:`About tiered storage in Aiven for ClickHouse ` diff --git a/docs/products/clickhouse/howto/configure-tiered-storage.rst b/docs/products/clickhouse/howto/configure-tiered-storage.rst index a983a41aaf..6071e9e423 100644 --- a/docs/products/clickhouse/howto/configure-tiered-storage.rst +++ b/docs/products/clickhouse/howto/configure-tiered-storage.rst @@ -83,7 +83,7 @@ What's next * :doc:`Check data volume distribution between different disks ` -Related reading +Related pages --------------- * :doc:`About tiered storage in Aiven for ClickHouse ` diff --git a/docs/products/clickhouse/howto/connect-with-clickhouse-cli.rst b/docs/products/clickhouse/howto/connect-with-clickhouse-cli.rst index 29486d0a50..d6b1b3a15a 100644 --- a/docs/products/clickhouse/howto/connect-with-clickhouse-cli.rst +++ b/docs/products/clickhouse/howto/connect-with-clickhouse-cli.rst @@ -15,7 +15,7 @@ To use the ClickHouse® client across different operating systems, we recommend Connection properties --------------------- -You will need to know the following properties to establish a secure connection with your Aiven for ClickHouse service: **Host**, **Port**, **User** and **Password**. You will find these in the *Connection information* section in the *Overview* page of your service in the `Aiven web console `_. +You will need to know the following properties to establish a secure connection with your Aiven for ClickHouse service: **Host**, **Port**, **User** and **Password**. You will find these in the **Connection information** section on the **Overview** page of your service in the `Aiven web console `_. Command template ---------------- diff --git a/docs/products/clickhouse/howto/connect-with-java.rst b/docs/products/clickhouse/howto/connect-with-java.rst index 38db92a48d..fe47cffa82 100644 --- a/docs/products/clickhouse/howto/connect-with-java.rst +++ b/docs/products/clickhouse/howto/connect-with-java.rst @@ -45,33 +45,33 @@ Connect to the service 2. Replace ``CLICKHOUSE_HTTPS_HOST`` and ``CLICKHOUSE_HTTPS_PORT`` in the command with your connection values and run the code. -.. code-block:: shell + .. code-block:: shell - jdbc:ch://CLICKHOUSE_HTTPS_HOST:CLICKHOUSE_HTTPS_PORT?ssl=true&sslmode=STRICT + jdbc:ch://CLICKHOUSE_HTTPS_HOST:CLICKHOUSE_HTTPS_PORT?ssl=true&sslmode=STRICT 3. Replace ``CLICKHOUSE_USER`` and ``CLICKHOUSE_PASSWORD`` in the code with meaningful data and run the code. -.. code-block:: java - - import com.clickhouse.jdbc.ClickHouseConnection; - import com.clickhouse.jdbc.ClickHouseDataSource; - - import java.sql.ResultSet; - import java.sql.SQLException; - import java.sql.Statement; - - public class Main { - public static void main(String[] args) throws SQLException { - String connString = "jdbc:ch://CLICKHOUSE_HTTPS_HOST:CLICKHOUSE_HTTPS_PORT?ssl=true&sslmode=STRICT"; - ClickHouseDataSource database = new ClickHouseDataSource(connString); - ClickHouseConnection connection = database.getConnection("CLICKHOUSE_USER", "CLICKHOUSE_PASSWORD"); - Statement statement = connection.createStatement(); - ResultSet result_set = statement.executeQuery("SELECT 1 AS one"); - while (result_set.next()) { - System.out.println(result_set.getInt("one")); + .. code-block:: java + + import com.clickhouse.jdbc.ClickHouseConnection; + import com.clickhouse.jdbc.ClickHouseDataSource; + + import java.sql.ResultSet; + import java.sql.SQLException; + import java.sql.Statement; + + public class Main { + public static void main(String[] args) throws SQLException { + String connString = "jdbc:ch://CLICKHOUSE_HTTPS_HOST:CLICKHOUSE_HTTPS_PORT?ssl=true&sslmode=STRICT"; + ClickHouseDataSource database = new ClickHouseDataSource(connString); + ClickHouseConnection connection = database.getConnection("CLICKHOUSE_USER", "CLICKHOUSE_PASSWORD"); + Statement statement = connection.createStatement(); + ResultSet result_set = statement.executeQuery("SELECT 1 AS one"); + while (result_set.next()) { + System.out.println(result_set.getInt("one")); + } } } - } .. topic:: Expected result diff --git a/docs/products/clickhouse/howto/data-service-integration.rst b/docs/products/clickhouse/howto/data-service-integration.rst index 9c874a0914..9af191cc0a 100644 --- a/docs/products/clickhouse/howto/data-service-integration.rst +++ b/docs/products/clickhouse/howto/data-service-integration.rst @@ -36,47 +36,43 @@ Prerequisites Create data service integrations -------------------------------- -1. Log in to the `Aiven web console `_. -2. In the **Services** page, select an Aiven for ClickHouse service you want to integrate with a data service. -3. Select **Get started** from the **Integrate your Aiven for ClickHouse** section in the **Overview** page of your service. - -4. In the **Data service integrations** wizard, select one of the following options: - -* To create a new service and integrate it, make sure the checkboxes for both service types are unchecked. - - .. dropdown:: Expand for next steps - - 1. In the **Data service integrations** view, select **Create service**. - 2. :doc:`Set up the new service `. - 3. Come back to your primary service and create an integration to the newly-created service. For that purpose, skip the steps that follow and start over with building your integration using this instruction but now follow the part on :ref:`integrating with an existing service `. - - or - -.. _integrate-existing-service: - -* To create an integration with an existing service, select a type of service you want to integrate with (Aiven for Apache Kafka or Aiven for PostgreSQL). - - .. dropdown:: Expand for next steps +#. Log in to the `Aiven web console `_. +#. In the **Services** page, select an Aiven for ClickHouse service you want to integrate with a data service. +#. Select **Get started** from the **Integrate your Aiven for ClickHouse** section in the **Overview** page of your service. +#. In the **Data service integrations** wizard, select one of the following options: + + **Option 1: Create a new service and integrate it** + + To create an integration with a **new service**: + + #. Make sure the checkboxes for both service types are unchecked. + #. In the **Data service integrations** view, select **Create service**. + #. :doc:`Set up the new service `. + #. Come back to your primary service and create an integration to the newly-created service. + For that purpose, skip the steps that follow and start over with building your integration using this + instruction but now follow the steps below about **integrating with an existing service**. - 1. Select a service of the chosen type from the list of services available for integration. - 2. Select **Continue** and proceed to the :ref:`database setup part `. + **Option 2: Use an existing service and integrate it** -.. _integration-db: + To create an integration with an **existing service**: + + #. Select a service of the chosen type from the list of services available for integration. + #. Select **Continue** and proceed to the next step to integrate the database. -5. In the **Integration databases** view, select either **Enable without databases** or **Add databases** depending on whether you want to enable your integration with databases. +#. In the **Integration databases** view, select either **Enable without databases** or **Add databases** depending on whether you want to enable your integration with databases: - .. dropdown:: Expand for enabling your integration with databases + - To enable your integration **with** databases: - 1. In the **Integration databases** view, select **Add databases**. - 2. In the **Add integration databases** section, enter database names and schema names and select **Enable** when ready. + #. In the **Integration databases** view, select **Add databases**. + #. In the **Add integration databases** section, enter database names and schema names and select **Enable** when ready. - You can preview the created databases by selecting **Databases tables** from the sidebar. + You can preview the created databases by selecting **Databases tables** from the sidebar. - .. dropdown:: Expand for enabling your integration without databases + - To enable your integration **without** databases - In the **Integration databases** view, select **Enable without databases**. + #. In the **Integration databases** view, select **Enable without databases**. - You can preview the created integration by selecting **Overview** from the sidebar. + You can preview the created integration by selecting **Overview** from the sidebar. View data service integrations ------------------------------ @@ -102,7 +98,7 @@ Stop data service integrations Your integration has been removed along with all the corresponding databases and configuration information. -Related reading +Related pages --------------- * :doc:`Manage Aiven for ClickHouse® integration databases ` diff --git a/docs/products/clickhouse/howto/enable-tiered-storage.rst b/docs/products/clickhouse/howto/enable-tiered-storage.rst index 4440528b62..7c74259e43 100644 --- a/docs/products/clickhouse/howto/enable-tiered-storage.rst +++ b/docs/products/clickhouse/howto/enable-tiered-storage.rst @@ -34,7 +34,7 @@ Prerequisites * You have an Aiven organization and at least one project. * You have a command line tool (:doc:`ClickHouse client `) installed. -* All maintenance updates are applied on your service (check on the **Overview** page of your service in Aiven Console). +* All maintenance updates are applied on your service (check in Aiven Console: your service's page > **Service settings** > **Service management** > **Maintenance updates**). Enable tiered storage on a project ---------------------------------- @@ -70,7 +70,7 @@ What's next * :doc:`Configure data retention thresholds for tiered storage ` * :doc:`Check data volume distribution between different disks ` -Related reading +Related pages --------------- * :doc:`About tiered storage in Aiven for ClickHouse ` diff --git a/docs/products/clickhouse/howto/integrate-kafka.rst b/docs/products/clickhouse/howto/integrate-kafka.rst index 042c0ca96d..5fe862e98e 100644 --- a/docs/products/clickhouse/howto/integrate-kafka.rst +++ b/docs/products/clickhouse/howto/integrate-kafka.rst @@ -45,7 +45,7 @@ Create an integration To connect Aiven for ClickHouse and Aiven for Apache Kafka by enabling a data service integration, see :ref:`Create data service integrations `. -The newly created database name has the following format: `service_KAFKA_SERVICE_NAME`, where KAFKA_SERVICE_NAME is the name of your Apache Kafka service. +The newly created database name has the following format: ``service_KAFKA_SERVICE_NAME``, where KAFKA_SERVICE_NAME is the name of your Apache Kafka service. .. note:: @@ -185,7 +185,7 @@ Follow these instructions: .. code:: avn service integration-list \ - --project PROJECT \ + --project PROJECT_NAME \ CLICKHOUSE_SERVICE_NAME | grep KAFKA_SERVICE_NAME 2. Update the configuration settings using the service integration id retrieved in the previous step and your integration settings. Replace ``SERVICE_INTEGRATION_ID``, ``CONNECTOR_TABLE_NAME``, ``DATA_FORMAT`` and ``CONSUMER_NAME`` with your values: @@ -193,7 +193,7 @@ Follow these instructions: .. code:: avn service integration-update SERVICE_INTEGRATION_ID \ - --project PROJECT \ + --project PROJECT_NAME \ --user-config-json '{ "tables": [ { diff --git a/docs/products/clickhouse/howto/integrate-postgresql.rst b/docs/products/clickhouse/howto/integrate-postgresql.rst index b59d2a102a..aabfeee7e2 100644 --- a/docs/products/clickhouse/howto/integrate-postgresql.rst +++ b/docs/products/clickhouse/howto/integrate-postgresql.rst @@ -63,13 +63,13 @@ When connecting to a PostgreSQL service, ClickHouse needs to know the name of th .. code:: - avn service integration-list CLICKHOUSE_SERVICE_NAME | grep PG_SERVICE_NAME + avn service integration-list --project PROJECT_NAME CLICKHOUSE_SERVICE_NAME | grep PG_SERVICE_NAME -2. Update the configuration settings using the service integration id retrieved in the previous step and your integration settings. Replace ``SERVICE_INTEGRATION_ID``, ``PG_DATABASE`` and ``PG_SCHEMA`` with your values, you can add more than one combination of database/schema in the object ``databases``: +1. Update the configuration settings using the service integration id retrieved in the previous step and your integration settings. Replace ``SERVICE_INTEGRATION_ID``, ``PG_DATABASE`` and ``PG_SCHEMA`` with your values, you can add more than one combination of database/schema in the object ``databases``: .. code:: - avn service integration-update SERVICE_INTEGRATION_ID \ + avn service integration-update --project PROJECT_NAME SERVICE_INTEGRATION_ID \ --user-config-json '{ "databases":[{"database":"PG_DATABASE","schema":"PG_SCHEMA"}] }' diff --git a/docs/products/clickhouse/howto/integration-databases.rst b/docs/products/clickhouse/howto/integration-databases.rst index 2b1e834e2c..de726c68f1 100644 --- a/docs/products/clickhouse/howto/integration-databases.rst +++ b/docs/products/clickhouse/howto/integration-databases.rst @@ -3,7 +3,7 @@ Manage Aiven for ClickHouse® integration databases Aiven for ClickHouse supports :doc:`regular integrations ` and :doc:`data service integrations `. -You can create Aiven for ClickHouse® integrations databases in the `Aiven web console `_ either when :ref:`creating a new data service integration ` or from the the **Databases and tables** view of your service. +You can create Aiven for ClickHouse® integrations databases in the `Aiven web console `_ either when :ref:`creating a new data service integration ` or from the the **Databases and tables** view of your service. This article details how to set up and manage integration databases from the the **Databases and tables** view of your Aiven for ClickHouse service. @@ -22,6 +22,8 @@ Prerequisites * Aiven account * Access to `Aiven web console `_ +.. _create-integ-db: + Create integration databases ---------------------------- @@ -110,7 +112,7 @@ Delete integration databases Your integration database has been removed from the **Databases and tables** list. -Related reading +Related pages --------------- * :doc:`Manage Aiven for ClickHouse® data service integrations ` diff --git a/docs/products/clickhouse/howto/load-dataset.rst b/docs/products/clickhouse/howto/load-dataset.rst index 9377319e46..39b90004a5 100644 --- a/docs/products/clickhouse/howto/load-dataset.rst +++ b/docs/products/clickhouse/howto/load-dataset.rst @@ -13,9 +13,11 @@ The steps below show you how to download the dataset, set up a connection with t Download the dataset -------------------- -Download the original dataset directly from `the dataset documentation page `_. You can do this using cURL, where the generic command looks like this:: +Download the original dataset directly from `the dataset documentation page `_. You can do this using cURL, where the generic command looks like this: - curl address_to_file_in_format_tsv_xz | unxz --threads=`nproc` > file-name.tsv +.. code:: + + curl address_to_file_in_format_tsv_xz | unxz --threads=`nproc` > file-name.tsv .. note:: The ``nproc`` Linux command, which prints the number of processing units, is not available on macOS. To use the above command, add an alias for ``nproc`` into your ``~/.zshrc`` file: ``alias nproc="sysctl -n hw.logicalcpu"``. @@ -27,7 +29,7 @@ Once done, you should have two files available: ``hits_v1.tsv`` and ``visits_v1. Set up the service and database ------------------------------- -If you don't yet have an Aiven for ClickHouse service, follow the steps in our :doc:`getting started guide ` to create one. +If you don't yet have an Aiven for ClickHouse service, follow the steps in our :doc:`getting started guide ` to create one. When you create a service, a default database was already added. However, you can create separate databases specific to your use case. We will create a database with the name ``datasets``, keeping it the same as in the ClickHouse documentation. @@ -79,24 +81,28 @@ Load data Now that you have a dataset with two empty tables, we'll load data into each of the tables. However, because we need to access files outside the docker container, we'll run the command specifying ``--query`` parameter. To do this: -1. Go to the folder where you stored the downloaded files for ``hits_v1.tsv`` and ``visits_v1.tsv``. - -#. Run the following command:: - - cat hits_v1.tsv | docker run \ - --interactive \ - --rm clickhouse/clickhouse-server clickhouse-client \ - --user USERNAME \ - --password PASSWORD \ - --host HOST \ - --port PORT \ - --secure \ - --max_insert_block_size=100000 \ - --query="INSERT INTO datasets.hits_v1 FORMAT TSV" +#. Go to the folder where you stored the downloaded files for ``hits_v1.tsv`` and ``visits_v1.tsv``. + +#. Run the following command: + + .. code:: + + cat hits_v1.tsv | docker run \ + --interactive \ + --rm clickhouse/clickhouse-server clickhouse-client \ + --user USERNAME \ + --password PASSWORD \ + --host HOST \ + --port PORT \ + --secure \ + --max_insert_block_size=100000 \ + --query="INSERT INTO datasets.hits_v1 FORMAT TSV" ``hits_v1.tsv`` contains approximately 7Gb of data. Depending on your internet connection, it can take some time to load all the items. -#. Run the corresponding command for ``visits_v1.tsv``:: +#. Run the corresponding command for ``visits_v1.tsv``: + + .. code:: cat visits_v1.tsv | docker run \ --interactive \ diff --git a/docs/products/clickhouse/howto/manage-users-roles.rst b/docs/products/clickhouse/howto/manage-users-roles.rst index 1ae7f5780c..aea192a1a2 100644 --- a/docs/products/clickhouse/howto/manage-users-roles.rst +++ b/docs/products/clickhouse/howto/manage-users-roles.rst @@ -8,7 +8,7 @@ Add a new user To create a new user account for your service, -1. Log in to the `Aiven web console `_ and, select your ClickHouse® service. +#. Log in to the `Aiven web console `_ and, select your ClickHouse® service. #. Select **Users and roles** from the sidebar of your service's page. @@ -41,7 +41,7 @@ This article shows you examples of how to create roles and grant privileges. The Create a new role ^^^^^^^^^^^^^^^^^ -To create a new role named `auditor`, run the following command: +To create a new role named **auditor**, run the following command: .. code:: @@ -54,37 +54,47 @@ Grant permissions You can grant permissions both to specific roles and to individual users. The grants can be also granular, targeting specific databases, tables, columns, or rows. -For example, the following request grants the ``auditor`` role permissions to select data from the ``transactions`` database:: +For example, the following request grants the ``auditor`` role permissions to select data from the ``transactions`` database: + +.. code:: GRANT SELECT ON transactions.* TO auditor; -You can limit the grant to a specified table:: +You can limit the grant to a specified table: + +.. code:: GRANT SELECT ON transactions.expenses TO auditor; -Or to particular columns of a table:: +Or to particular columns of a table: + +.. code:: GRANT SELECT(date,description,amount) ON transactions.expenses TO auditor -To grant the ``auditor`` and ``external`` roles to several users, run:: +To grant the ``auditor`` and ``external`` roles to several users, run: + +.. code:: GRANT auditor, external TO Mary.Anderson, James.Miller; -To allow the creation of new users:: +To allow the creation of new users: + +.. code:: GRANT CREATE USER ON transactions.* TO administrator There are a variety of privileges that you can grant, and you can find `the full list in the ClickHouse documentation `_. -.. note :: +.. note:: You can grant permissions to a table that does not yet exist. -.. note :: +.. note:: Users can grant permissions according to their privileges. If the user lacks the required permissions for a requested operation, they receive a `Not enough privileges` exception. -.. warning :: +.. warning:: Privileges are not revoked when a table or database is removed. They continue to be active for any new table or database that is created with the same name. @@ -108,27 +118,35 @@ You can also specify a role to be activated by default when the user logs in: Delete a role ^^^^^^^^^^^^^ -If you no longer need a role, you can remove it:: +If you no longer need a role, you can remove it: + +.. code:: DROP ROLE auditor; Revoke permissions ^^^^^^^^^^^^^^^^^^ -Remove all or specific privileges from users or roles:: +Remove all or specific privileges from users or roles: - REVOKE SELECT ON transactions.expenses FROM Mary.Anderson; +.. code:: -Revoke all privileges to a table or database simultaneously:: + REVOKE SELECT ON transactions.expenses FROM Mary.Anderson; - REVOKE ALL PRIVILEGES ON database.table FROM external; +Revoke all privileges to a table or database simultaneously: + +.. code:: + + REVOKE ALL PRIVILEGES ON database.table FROM external; See the ClickHouse documentation `for more information on revoking privileges `_. Check permissions ^^^^^^^^^^^^^^^^^ -Run the following commands to see all available grants, users, and roles:: +Run the following commands to see all available grants, users, and roles: + +.. code:: SHOW GRANTS; diff --git a/docs/products/clickhouse/howto/query-databases.rst b/docs/products/clickhouse/howto/query-databases.rst index 87708ea495..8323785132 100644 --- a/docs/products/clickhouse/howto/query-databases.rst +++ b/docs/products/clickhouse/howto/query-databases.rst @@ -26,17 +26,23 @@ The requests that you run through the query editor rely on the permissions grant Examples of queries ^^^^^^^^^^^^^^^^^^^ -Retrieve a list of current databases:: +Retrieve a list of current databases: - SHOW DATABASES +.. code:: -Count rows:: + SHOW DATABASES - SELECT COUNT(*) FROM transactions.accounts +Count rows: -Create a new role:: +.. code:: - CREATE ROLE accountant + SELECT COUNT(*) FROM transactions.accounts + +Create a new role: + +.. code:: + + CREATE ROLE accountant .. _play-iu: diff --git a/docs/products/clickhouse/howto/run-federated-queries.rst b/docs/products/clickhouse/howto/run-federated-queries.rst index 3c53b060ea..5a7984aceb 100644 --- a/docs/products/clickhouse/howto/run-federated-queries.rst +++ b/docs/products/clickhouse/howto/run-federated-queries.rst @@ -194,7 +194,7 @@ Once the table is defined, SELECT and INSERT statements execute GET and POST req INSERT INTO trips_export_endpoint_table VALUES (8765, 10, now() - INTERVAL 15 MINUTE, now(), 50, 20) -Related reading +Related pages --------------- * :doc:`About querying external data in Aiven for ClickHouse® ` diff --git a/docs/products/clickhouse/howto/secure-service.rst b/docs/products/clickhouse/howto/secure-service.rst index 63232b005e..b1708b289c 100644 --- a/docs/products/clickhouse/howto/secure-service.rst +++ b/docs/products/clickhouse/howto/secure-service.rst @@ -25,9 +25,9 @@ Aiven services can be protected against accidental deletion or powering off by e Enable the termination protection ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -1. Log in to the `Aiven web console `_, and select your ClickHouse® service from the **Services** view. - -2. In the **Overview** page of your service, scroll down to the **Termination protection** section and enable this feature using the toggle switch. +1. Log in to `Aiven Console `_, and select your ClickHouse® service from the **Services** view. +2. On the **Overview** page of your service, select **Service settings** from the sidebar. +3. On the **Service settings** page, navigate to the **Service status** section, and select **Enable termination protection** from the **Actions** (**...**) menu. .. topic:: Result diff --git a/docs/products/clickhouse/howto/transfer-data-tiered-storage.rst b/docs/products/clickhouse/howto/transfer-data-tiered-storage.rst index 113cbb6adf..3590d72819 100644 --- a/docs/products/clickhouse/howto/transfer-data-tiered-storage.rst +++ b/docs/products/clickhouse/howto/transfer-data-tiered-storage.rst @@ -65,7 +65,7 @@ What's next * :doc:`Check data distribution between SSD and object storage ` * :doc:`Configure data retention thresholds for tiered storage ` -Related reading +Related pages --------------- * :doc:`About tiered storage in Aiven for ClickHouse ` diff --git a/docs/products/clickhouse/howto/use-shards-with-distributed-table.rst b/docs/products/clickhouse/howto/use-shards-with-distributed-table.rst index 099c992d90..5e76f30375 100644 --- a/docs/products/clickhouse/howto/use-shards-with-distributed-table.rst +++ b/docs/products/clickhouse/howto/use-shards-with-distributed-table.rst @@ -52,21 +52,21 @@ Check if the distributed table you created is available and if you can use it to 1. Run a read query for the number of table rows: -.. code-block:: sql + .. code-block:: sql - SELECT count() FROM test_db.cash_flows_distributed + SELECT count() FROM test_db.cash_flows_distributed As a response to this query, you can expect to receive a number of rows from all the shards. This is because when you connect on one node and read from the distributed table, ClickHouse® aggregates the data from all the shards and returns all of it. 2. Run a write query to insert new data into the distributed table: -.. code-block:: sql + .. code-block:: sql - INSERT INTO test_db.cash_flows_distributed ( + INSERT INTO test_db.cash_flows_distributed ( EventDate, SourceAccount, TargetAccount, Amount - ) - VALUES ( + ) + VALUES ( '2022-01-02 03:04:05', 123, 456, 100.0 - ) + ) When you insert data into the distributed table, ClickHouse® decides on which node the data should be stored and write it to the correct node making sure that a similar volume of data is written on all the nodes. diff --git a/docs/products/clickhouse/reference.rst b/docs/products/clickhouse/reference.rst index 724adc8807..9d0d5ea330 100644 --- a/docs/products/clickhouse/reference.rst +++ b/docs/products/clickhouse/reference.rst @@ -28,3 +28,15 @@ Additional reference information for Aiven for ClickHouse®: .. grid-item-card:: :doc:`Table functions supported in Aiven for ClickHouse® ` :shadow: md :margin: 2 2 0 0 + + .. grid-item-card:: :doc:`Interfaces and drivers supported in Aiven for ClickHouse® ` + :shadow: md + :margin: 2 2 0 0 + + .. grid-item-card:: :doc:`Aiven for ClickHouse® metrics available via Datadog ` + :shadow: md + :margin: 2 2 0 0 + + .. grid-item-card:: :doc:`Aiven for ClickHouse® metrics available via Prometheus ` + :shadow: md + :margin: 2 2 0 0 diff --git a/docs/products/clickhouse/reference/clickhouse-metrics-datadog.rst b/docs/products/clickhouse/reference/clickhouse-metrics-datadog.rst new file mode 100644 index 0000000000..98dba8fd77 --- /dev/null +++ b/docs/products/clickhouse/reference/clickhouse-metrics-datadog.rst @@ -0,0 +1,15 @@ +Aiven for ClickHouse® metrics available via Datadog +=================================================== + +Learn what metrics are available via Datadog for Aiven for ClickHouse® services. + +Get a metrics list for your service +----------------------------------- + +The list of Aiven for ClickHouse metrics available in Datadog corresponds to the list of metrics available for the open-source ClickHouse and can be checked in `Metrics `_. + +Related pages +--------------- + +* Check how to use Datadog with Aiven services in :doc:`Datadog and Aiven `. +* Check how to send metrics to Datadog from Aiven services in :doc:`Send metrics to Datadog `. diff --git a/docs/products/clickhouse/reference/clickhouse-metrics-prometheus.rst b/docs/products/clickhouse/reference/clickhouse-metrics-prometheus.rst new file mode 100644 index 0000000000..e190af055c --- /dev/null +++ b/docs/products/clickhouse/reference/clickhouse-metrics-prometheus.rst @@ -0,0 +1,1440 @@ +Aiven for ClickHouse® metrics available via Prometheus +====================================================== + +This article provides the list of all metrics available via Prometheus for Aiven for ClickHouse® services. + +You can retrieve the complete list of available metrics for your service by requesting the Prometheus endpoint as follows: + +.. code-block:: bash + + curl --cacert ca.pem \ + --user ':' \ + 'https://:/metrics' + +Where you substitute the following: + +* Aiven project certificate for ``ca.pem`` +* Prometheus credentials for ``:`` +* Aiven for ClickHouse hostname for ```` +* Prometheus port for ```` + +.. Tip:: + + You can check how to use Prometheus with Aiven in :doc:`Prometheus metrics `. + +.. code-block:: shell + + # TYPE clickhouse_asynchronous_metrics_number_of_databases untyped + clickhouse_asynchronous_metrics_number_of_databases + # TYPE clickhouse_asynchronous_metrics_number_of_tables untyped + clickhouse_asynchronous_metrics_number_of_tables + # TYPE clickhouse_asynchronous_metrics_total_parts_of_merge_tree_tables untyped + clickhouse_asynchronous_metrics_total_parts_of_merge_tree_tables + # TYPE clickhouse_asynchronous_metrics_total_rows_of_merge_tree_tables untyped + clickhouse_asynchronous_metrics_total_rows_of_merge_tree_tables + # TYPE clickhouse_events_inserted_bytes untyped + clickhouse_events_inserted_bytes + # TYPE clickhouse_events_inserted_rows untyped + clickhouse_events_inserted_rows + # TYPE clickhouse_events_merge untyped + clickhouse_events_merge + # TYPE clickhouse_events_merged_rows untyped + clickhouse_events_merged_rows + # TYPE clickhouse_events_merged_uncompressed_bytes untyped + clickhouse_events_merged_uncompressed_bytes + # TYPE clickhouse_events_query untyped + clickhouse_events_query + # TYPE clickhouse_events_read_compressed_bytes untyped + clickhouse_events_read_compressed_bytes + # TYPE clickhouse_events_select_query untyped + clickhouse_events_select_query + # TYPE clickhouse_metrics_delayed_inserts untyped + clickhouse_metrics_delayed_inserts + # TYPE clickhouse_metrics_ephemeral_node untyped + clickhouse_metrics_ephemeral_node + # TYPE clickhouse_metrics_http_connection untyped + clickhouse_metrics_http_connection + # TYPE clickhouse_metrics_interserver_connection untyped + clickhouse_metrics_interserver_connection + # TYPE clickhouse_metrics_merge untyped + clickhouse_metrics_merge + # TYPE clickhouse_metrics_query untyped + clickhouse_metrics_query + # TYPE clickhouse_metrics_query_preempted untyped + clickhouse_metrics_query_preempted + # TYPE clickhouse_metrics_readonly_replica untyped + clickhouse_metrics_readonly_replica + # TYPE clickhouse_metrics_replicated_checks untyped + clickhouse_metrics_replicated_checks + # TYPE clickhouse_metrics_replicated_fetch untyped + clickhouse_metrics_replicated_fetch + # TYPE clickhouse_metrics_rw_lock_active_readers untyped + clickhouse_metrics_rw_lock_active_readers + # TYPE clickhouse_metrics_rw_lock_active_writers untyped + clickhouse_metrics_rw_lock_active_writers + # TYPE clickhouse_metrics_rw_lock_waiting_readers untyped + clickhouse_metrics_rw_lock_waiting_readers + # TYPE clickhouse_metrics_rw_lock_waiting_writers untyped + clickhouse_metrics_rw_lock_waiting_writers + # TYPE clickhouse_metrics_tcp_connection untyped + clickhouse_metrics_tcp_connection + # TYPE clickhouse_metrics_zoo_keeper_request untyped + clickhouse_metrics_zoo_keeper_request + # TYPE clickhouse_metrics_zoo_keeper_session untyped + clickhouse_metrics_zoo_keeper_session + # TYPE clickhouse_metrics_zoo_keeper_watch untyped + clickhouse_metrics_zoo_keeper_watch + # TYPE clickhouse_replication_queue_num_attach_part untyped + clickhouse_replication_queue_num_attach_part + # TYPE clickhouse_replication_queue_num_get_part untyped + clickhouse_replication_queue_num_get_part + # TYPE clickhouse_replication_queue_num_merge_parts untyped + clickhouse_replication_queue_num_merge_parts + # TYPE clickhouse_replication_queue_num_merge_parts_ttl_delete untyped + clickhouse_replication_queue_num_merge_parts_ttl_delete + # TYPE clickhouse_replication_queue_num_merge_parts_ttl_recompress untyped + clickhouse_replication_queue_num_merge_parts_ttl_recompress + # TYPE clickhouse_replication_queue_num_mutate_part untyped + clickhouse_replication_queue_num_mutate_part + # TYPE clickhouse_replication_queue_num_total untyped + clickhouse_replication_queue_num_total + # TYPE clickhouse_replication_queue_num_tries_replicas untyped + clickhouse_replication_queue_num_tries_replicas + # TYPE clickhouse_replication_queue_too_many_tries_replicas untyped + clickhouse_replication_queue_too_many_tries_replicas + # TYPE cpu_usage_guest gauge + cpu_usage_guest + # TYPE cpu_usage_guest_nice gauge + cpu_usage_guest_nice + # TYPE cpu_usage_idle gauge + cpu_usage_idle + # TYPE cpu_usage_iowait gauge + cpu_usage_iowait + # TYPE cpu_usage_irq gauge + cpu_usage_irq + # TYPE cpu_usage_nice gauge + cpu_usage_nice + # TYPE cpu_usage_softirq gauge + cpu_usage_softirq + # TYPE cpu_usage_steal gauge + cpu_usage_steal + # TYPE cpu_usage_system gauge + cpu_usage_system + # TYPE cpu_usage_user gauge + cpu_usage_user + # TYPE disk_free gauge + disk_free + # TYPE disk_inodes_free gauge + disk_inodes_free + # TYPE disk_inodes_total gauge + disk_inodes_total + # TYPE disk_inodes_used gauge + disk_inodes_used + # TYPE disk_total gauge + disk_total + # TYPE disk_used gauge + disk_used + # TYPE disk_used_percent gauge + disk_used_percent + # TYPE diskio_io_time counter + diskio_io_time + # TYPE diskio_iops_in_progress counter + diskio_iops_in_progress + # TYPE diskio_merged_reads counter + diskio_merged_reads + # TYPE diskio_merged_writes counter + diskio_merged_writes + # TYPE diskio_read_bytes counter + diskio_read_bytes + # TYPE diskio_read_time counter + diskio_read_time + # TYPE diskio_reads counter + diskio_reads + # TYPE diskio_weighted_io_time counter + diskio_weighted_io_time + # TYPE diskio_write_bytes counter + diskio_write_bytes + # TYPE diskio_write_time counter + diskio_write_time + # TYPE diskio_writes counter + diskio_writes + # TYPE kernel_boot_time counter + kernel_boot_time + # TYPE kernel_context_switches counter + kernel_context_switches + # TYPE kernel_entropy_avail counter + kernel_entropy_avail + # TYPE kernel_interrupts counter + kernel_interrupts + # TYPE kernel_processes_forked counter + kernel_processes_forked + # TYPE mem_active gauge + mem_active + # TYPE mem_available gauge + mem_available + # TYPE mem_available_percent gauge + mem_available_percent + # TYPE mem_buffered gauge + mem_buffered + # TYPE mem_cached gauge + mem_cached + # TYPE mem_commit_limit gauge + mem_commit_limit + # TYPE mem_committed_as gauge + mem_committed_as + # TYPE mem_dirty gauge + mem_dirty + # TYPE mem_free gauge + mem_free + # TYPE mem_high_free gauge + mem_high_free + # TYPE mem_high_total gauge + mem_high_total + # TYPE mem_huge_page_size gauge + mem_huge_page_size + # TYPE mem_huge_pages_free gauge + mem_huge_pages_free + # TYPE mem_huge_pages_total gauge + mem_huge_pages_total + # TYPE mem_inactive gauge + mem_inactive + # TYPE mem_low_free gauge + mem_low_free + # TYPE mem_low_total gauge + mem_low_total + # TYPE mem_mapped gauge + mem_mapped + # TYPE mem_page_tables gauge + mem_page_tables + # TYPE mem_shared gauge + mem_shared + # TYPE mem_slab gauge + mem_slab + # TYPE mem_sreclaimable gauge + mem_sreclaimable + # TYPE mem_sunreclaim gauge + mem_sunreclaim + # TYPE mem_swap_cached gauge + mem_swap_cached + # TYPE mem_swap_free gauge + mem_swap_free + # TYPE mem_swap_total gauge + mem_swap_total + # TYPE mem_total gauge + mem_total + # TYPE mem_used gauge + mem_used + # TYPE mem_used_percent gauge + mem_used_percent + # TYPE mem_vmalloc_chunk gauge + mem_vmalloc_chunk + # TYPE mem_vmalloc_total gauge + mem_vmalloc_total + # TYPE mem_vmalloc_used gauge + mem_vmalloc_used + # TYPE mem_write_back gauge + mem_write_back + # TYPE mem_write_back_tmp gauge + mem_write_back_tmp + # TYPE net_bytes_recv counter + net_bytes_recv + # TYPE net_bytes_sent counter + net_bytes_sent + # TYPE net_drop_in counter + net_drop_in + # TYPE net_drop_out counter + net_drop_out + # TYPE net_err_in counter + net_err_in + # TYPE net_err_out counter + net_err_out + # TYPE net_icmp_inaddrmaskreps untyped + net_icmp_inaddrmaskreps + # TYPE net_icmp_inaddrmasks untyped + net_icmp_inaddrmasks + # TYPE net_icmp_incsumerrors untyped + net_icmp_incsumerrors + # TYPE net_icmp_indestunreachs untyped + net_icmp_indestunreachs + # TYPE net_icmp_inechoreps untyped + net_icmp_inechoreps + # TYPE net_icmp_inechos untyped + net_icmp_inechos + # TYPE net_icmp_inerrors untyped + net_icmp_inerrors + # TYPE net_icmp_inmsgs untyped + net_icmp_inmsgs + # TYPE net_icmp_inparmprobs untyped + net_icmp_inparmprobs + # TYPE net_icmp_inredirects untyped + net_icmp_inredirects + # TYPE net_icmp_insrcquenchs untyped + net_icmp_insrcquenchs + # TYPE net_icmp_intimeexcds untyped + net_icmp_intimeexcds + # TYPE net_icmp_intimestampreps untyped + net_icmp_intimestampreps + # TYPE net_icmp_intimestamps untyped + net_icmp_intimestamps + # TYPE net_icmp_outaddrmaskreps untyped + net_icmp_outaddrmaskreps + # TYPE net_icmp_outaddrmasks untyped + net_icmp_outaddrmasks + # TYPE net_icmp_outdestunreachs untyped + net_icmp_outdestunreachs + # TYPE net_icmp_outechoreps untyped + net_icmp_outechoreps + # TYPE net_icmp_outechos untyped + net_icmp_outechos + # TYPE net_icmp_outerrors untyped + net_icmp_outerrors + # TYPE net_icmp_outmsgs untyped + net_icmp_outmsgs + # TYPE net_icmp_outparmprobs untyped + net_icmp_outparmprobs + # TYPE net_icmp_outratelimitglobal untyped + net_icmp_outratelimitglobal + # TYPE net_icmp_outratelimithost untyped + net_icmp_outratelimithost + # TYPE net_icmp_outredirects untyped + net_icmp_outredirects + # TYPE net_icmp_outsrcquenchs untyped + net_icmp_outsrcquenchs + # TYPE net_icmp_outtimeexcds untyped + net_icmp_outtimeexcds + # TYPE net_icmp_outtimestampreps untyped + net_icmp_outtimestampreps + # TYPE net_icmp_outtimestamps untyped + net_icmp_outtimestamps + # TYPE net_icmpmsg_intype3 untyped + net_icmpmsg_intype3 + # TYPE net_icmpmsg_intype8 untyped + net_icmpmsg_intype8 + # TYPE net_icmpmsg_outtype0 untyped + net_icmpmsg_outtype0 + # TYPE net_icmpmsg_outtype3 untyped + net_icmpmsg_outtype3 + # TYPE net_ip_defaultttl untyped + net_ip_defaultttl + # TYPE net_ip_forwarding untyped + net_ip_forwarding + # TYPE net_ip_forwdatagrams untyped + net_ip_forwdatagrams + # TYPE net_ip_fragcreates untyped + net_ip_fragcreates + # TYPE net_ip_fragfails untyped + net_ip_fragfails + # TYPE net_ip_fragoks untyped + net_ip_fragoks + # TYPE net_ip_inaddrerrors untyped + net_ip_inaddrerrors + # TYPE net_ip_indelivers untyped + net_ip_indelivers + # TYPE net_ip_indiscards untyped + net_ip_indiscards + # TYPE net_ip_inhdrerrors untyped + net_ip_inhdrerrors + # TYPE net_ip_inreceives untyped + net_ip_inreceives + # TYPE net_ip_inunknownprotos untyped + net_ip_inunknownprotos + # TYPE net_ip_outdiscards untyped + net_ip_outdiscards + # TYPE net_ip_outnoroutes untyped + net_ip_outnoroutes + # TYPE net_ip_outrequests untyped + net_ip_outrequests + # TYPE net_ip_reasmfails untyped + net_ip_reasmfails + # TYPE net_ip_reasmoks untyped + net_ip_reasmoks + # TYPE net_ip_reasmreqds untyped + net_ip_reasmreqds + # TYPE net_ip_reasmtimeout untyped + net_ip_reasmtimeout + # TYPE net_packets_recv counter + net_packets_recv + # TYPE net_packets_sent counter + net_packets_sent + # TYPE net_tcp_activeopens untyped + net_tcp_activeopens + # TYPE net_tcp_attemptfails untyped + net_tcp_attemptfails + # TYPE net_tcp_currestab untyped + net_tcp_currestab + # TYPE net_tcp_estabresets untyped + net_tcp_estabresets + # TYPE net_tcp_incsumerrors untyped + net_tcp_incsumerrors + # TYPE net_tcp_inerrs untyped + net_tcp_inerrs + # TYPE net_tcp_insegs untyped + net_tcp_insegs + # TYPE net_tcp_maxconn untyped + net_tcp_maxconn + # TYPE net_tcp_outrsts untyped + net_tcp_outrsts + # TYPE net_tcp_outsegs untyped + net_tcp_outsegs + # TYPE net_tcp_passiveopens untyped + net_tcp_passiveopens + # TYPE net_tcp_retranssegs untyped + net_tcp_retranssegs + # TYPE net_tcp_rtoalgorithm untyped + net_tcp_rtoalgorithm + # TYPE net_tcp_rtomax untyped + net_tcp_rtomax + # TYPE net_tcp_rtomin untyped + net_tcp_rtomin + # TYPE net_udp_ignoredmulti untyped + net_udp_ignoredmulti + # TYPE net_udp_incsumerrors untyped + net_udp_incsumerrors + # TYPE net_udp_indatagrams untyped + net_udp_indatagrams + # TYPE net_udp_inerrors untyped + net_udp_inerrors + # TYPE net_udp_memerrors untyped + net_udp_memerrors + # TYPE net_udp_noports untyped + net_udp_noports + # TYPE net_udp_outdatagrams untyped + net_udp_outdatagrams + # TYPE net_udp_rcvbuferrors untyped + net_udp_rcvbuferrors + # TYPE net_udp_sndbuferrors untyped + net_udp_sndbuferrors + # TYPE net_udplite_ignoredmulti untyped + net_udplite_ignoredmulti + # TYPE net_udplite_incsumerrors untyped + net_udplite_incsumerrors + # TYPE net_udplite_indatagrams untyped + net_udplite_indatagrams + # TYPE net_udplite_inerrors untyped + net_udplite_inerrors + # TYPE net_udplite_memerrors untyped + net_udplite_memerrors + # TYPE net_udplite_noports untyped + net_udplite_noports + # TYPE net_udplite_outdatagrams untyped + net_udplite_outdatagrams + # TYPE net_udplite_rcvbuferrors untyped + net_udplite_rcvbuferrors + # TYPE net_udplite_sndbuferrors untyped + net_udplite_sndbuferrors + # TYPE netstat_tcp_close untyped + netstat_tcp_close + # TYPE netstat_tcp_close_wait untyped + netstat_tcp_close_wait + # TYPE netstat_tcp_closing untyped + netstat_tcp_closing + # TYPE netstat_tcp_established untyped + netstat_tcp_established + # TYPE netstat_tcp_fin_wait1 untyped + netstat_tcp_fin_wait1 + # TYPE netstat_tcp_fin_wait2 untyped + netstat_tcp_fin_wait2 + # TYPE netstat_tcp_last_ack untyped + netstat_tcp_last_ack + # TYPE netstat_tcp_listen untyped + netstat_tcp_listen + # TYPE netstat_tcp_none untyped + netstat_tcp_none + # TYPE netstat_tcp_syn_recv untyped + netstat_tcp_syn_recv + # TYPE netstat_tcp_syn_sent untyped + netstat_tcp_syn_sent + # TYPE netstat_tcp_time_wait untyped + netstat_tcp_time_wait + # TYPE netstat_udp_socket untyped + netstat_udp_socket + # TYPE processes_blocked gauge + processes_blocked + # TYPE processes_dead gauge + processes_dead + # TYPE processes_idle gauge + processes_idle + # TYPE processes_paging gauge + processes_paging + # TYPE processes_running gauge + processes_running + # TYPE processes_sleeping gauge + processes_sleeping + # TYPE processes_stopped gauge + processes_stopped + # TYPE processes_total gauge + processes_total + # TYPE processes_total_threads gauge + processes_total_threads + # TYPE processes_unknown gauge + processes_unknown + # TYPE processes_zombies gauge + processes_zombies + # TYPE service_connections_accepted untyped + service_connections_accepted + # TYPE service_connections_dropped untyped + service_connections_dropped + # TYPE service_connections_limit_avg_per_second untyped + service_connections_limit_avg_per_second + # TYPE service_connections_limit_burst untyped + service_connections_limit_burst + # TYPE swap_free gauge + swap_free + # TYPE swap_in counter + swap_in + # TYPE swap_out counter + swap_out + # TYPE swap_total gauge + swap_total + # TYPE swap_used gauge + swap_used + # TYPE swap_used_percent gauge + swap_used_percent + # TYPE system_load1 gauge + system_load1 + # TYPE system_load15 gauge + system_load15 + # TYPE system_load5 gauge + system_load5 + # TYPE system_n_cpus gauge + system_n_cpus + # TYPE system_n_unique_users gauge + system_n_unique_users + # TYPE system_n_users gauge + system_n_users + # TYPE system_uptime counter + system_uptime + # TYPE zookeeper_add_dead_watcher_stall_time untyped + zookeeper_add_dead_watcher_stall_time + # TYPE zookeeper_approximate_data_size untyped + zookeeper_approximate_data_size + # TYPE zookeeper_auth_failed_count untyped + zookeeper_auth_failed_count + # TYPE zookeeper_bytes_received_count untyped + zookeeper_bytes_received_count + # TYPE zookeeper_cnt_1_ack_latency untyped + zookeeper_cnt_1_ack_latency + # TYPE zookeeper_cnt_action_create_service_user_done_write_per_namespace untyped + zookeeper_cnt_action_create_service_user_done_write_per_namespace + # TYPE zookeeper_cnt_action_grant_federated_queries_access_done_write_per_namespace untyped + zookeeper_cnt_action_grant_federated_queries_access_done_write_per_namespace + # TYPE zookeeper_cnt_action_grant_federated_queries_access_v2_done_write_per_namespace untyped + zookeeper_cnt_action_grant_federated_queries_access_v2_done_write_per_namespace + # TYPE zookeeper_cnt_action_restore_from_astacus_done_write_per_namespace untyped + zookeeper_cnt_action_restore_from_astacus_done_write_per_namespace + # TYPE zookeeper_cnt_action_update_service_users_privileges_done_write_per_namespace untyped + zookeeper_cnt_action_update_service_users_privileges_done_write_per_namespace + # TYPE zookeeper_cnt_action_update_service_users_privileges_v2_done_write_per_namespace untyped + zookeeper_cnt_action_update_service_users_privileges_v2_done_write_per_namespace + # TYPE zookeeper_cnt_clickhouse_read_per_namespace untyped + zookeeper_cnt_clickhouse_read_per_namespace + # TYPE zookeeper_cnt_clickhouse_write_per_namespace untyped + zookeeper_cnt_clickhouse_write_per_namespace + # TYPE zookeeper_cnt_close_session_prep_time untyped + zookeeper_cnt_close_session_prep_time + # TYPE zookeeper_cnt_commit_commit_proc_req_queued untyped + zookeeper_cnt_commit_commit_proc_req_queued + # TYPE zookeeper_cnt_commit_process_time untyped + zookeeper_cnt_commit_process_time + # TYPE zookeeper_cnt_commit_propagation_latency untyped + zookeeper_cnt_commit_propagation_latency + # TYPE zookeeper_cnt_concurrent_request_processing_in_commit_processor untyped + zookeeper_cnt_concurrent_request_processing_in_commit_processor + # TYPE zookeeper_cnt_connection_token_deficit untyped + zookeeper_cnt_connection_token_deficit + # TYPE zookeeper_cnt_dbinittime untyped + zookeeper_cnt_dbinittime + # TYPE zookeeper_cnt_dead_watchers_cleaner_latency untyped + zookeeper_cnt_dead_watchers_cleaner_latency + # TYPE zookeeper_cnt_election_leader_read_per_namespace untyped + zookeeper_cnt_election_leader_read_per_namespace + # TYPE zookeeper_cnt_election_leader_write_per_namespace untyped + zookeeper_cnt_election_leader_write_per_namespace + # TYPE zookeeper_cnt_election_time untyped + zookeeper_cnt_election_time + # TYPE zookeeper_cnt_follower_sync_time untyped + zookeeper_cnt_follower_sync_time + # TYPE zookeeper_cnt_fsynctime untyped + zookeeper_cnt_fsynctime + # TYPE zookeeper_cnt_health_write_per_namespace untyped + zookeeper_cnt_health_write_per_namespace + # TYPE zookeeper_cnt_inflight_diff_count untyped + zookeeper_cnt_inflight_diff_count + # TYPE zookeeper_cnt_inflight_snap_count untyped + zookeeper_cnt_inflight_snap_count + # TYPE zookeeper_cnt_jvm_pause_time_ms untyped + zookeeper_cnt_jvm_pause_time_ms + # TYPE zookeeper_cnt_local_write_committed_time_ms untyped + zookeeper_cnt_local_write_committed_time_ms + # TYPE zookeeper_cnt_netty_queued_buffer_capacity untyped + zookeeper_cnt_netty_queued_buffer_capacity + # TYPE zookeeper_cnt_node_changed_watch_count untyped + zookeeper_cnt_node_changed_watch_count + # TYPE zookeeper_cnt_node_children_watch_count untyped + zookeeper_cnt_node_children_watch_count + # TYPE zookeeper_cnt_node_created_watch_count untyped + zookeeper_cnt_node_created_watch_count + # TYPE zookeeper_cnt_node_deleted_watch_count untyped + zookeeper_cnt_node_deleted_watch_count + # TYPE zookeeper_cnt_node_slots_read_per_namespace untyped + zookeeper_cnt_node_slots_read_per_namespace + # TYPE zookeeper_cnt_node_slots_write_per_namespace untyped + zookeeper_cnt_node_slots_write_per_namespace + # TYPE zookeeper_cnt_nodes_read_per_namespace untyped + zookeeper_cnt_nodes_read_per_namespace + # TYPE zookeeper_cnt_nodes_write_per_namespace untyped + zookeeper_cnt_nodes_write_per_namespace + # TYPE zookeeper_cnt_om_commit_process_time_ms untyped + zookeeper_cnt_om_commit_process_time_ms + # TYPE zookeeper_cnt_om_proposal_process_time_ms untyped + zookeeper_cnt_om_proposal_process_time_ms + # TYPE zookeeper_cnt_pending_session_queue_size untyped + zookeeper_cnt_pending_session_queue_size + # TYPE zookeeper_cnt_prep_process_time untyped + zookeeper_cnt_prep_process_time + # TYPE zookeeper_cnt_prep_processor_queue_size untyped + zookeeper_cnt_prep_processor_queue_size + # TYPE zookeeper_cnt_prep_processor_queue_time_ms untyped + zookeeper_cnt_prep_processor_queue_time_ms + # TYPE zookeeper_cnt_propagation_latency untyped + zookeeper_cnt_propagation_latency + # TYPE zookeeper_cnt_proposal_ack_creation_latency untyped + zookeeper_cnt_proposal_ack_creation_latency + # TYPE zookeeper_cnt_proposal_latency untyped + zookeeper_cnt_proposal_latency + # TYPE zookeeper_cnt_quorum_ack_latency untyped + zookeeper_cnt_quorum_ack_latency + # TYPE zookeeper_cnt_read_commit_proc_issued untyped + zookeeper_cnt_read_commit_proc_issued + # TYPE zookeeper_cnt_read_commit_proc_req_queued untyped + zookeeper_cnt_read_commit_proc_req_queued + # TYPE zookeeper_cnt_read_commitproc_time_ms untyped + zookeeper_cnt_read_commitproc_time_ms + # TYPE zookeeper_cnt_read_final_proc_time_ms untyped + zookeeper_cnt_read_final_proc_time_ms + # TYPE zookeeper_cnt_readlatency untyped + zookeeper_cnt_readlatency + # TYPE zookeeper_cnt_reads_after_write_in_session_queue untyped + zookeeper_cnt_reads_after_write_in_session_queue + # TYPE zookeeper_cnt_reads_issued_from_session_queue untyped + zookeeper_cnt_reads_issued_from_session_queue + # TYPE zookeeper_cnt_requests_in_session_queue untyped + zookeeper_cnt_requests_in_session_queue + # TYPE zookeeper_cnt_server_write_committed_time_ms untyped + zookeeper_cnt_server_write_committed_time_ms + # TYPE zookeeper_cnt_session_queues_drained untyped + zookeeper_cnt_session_queues_drained + # TYPE zookeeper_cnt_snapshottime untyped + zookeeper_cnt_snapshottime + # TYPE zookeeper_cnt_startup_snap_load_time untyped + zookeeper_cnt_startup_snap_load_time + # TYPE zookeeper_cnt_startup_txns_load_time untyped + zookeeper_cnt_startup_txns_load_time + # TYPE zookeeper_cnt_startup_txns_loaded untyped + zookeeper_cnt_startup_txns_loaded + # TYPE zookeeper_cnt_sync_process_time untyped + zookeeper_cnt_sync_process_time + # TYPE zookeeper_cnt_sync_processor_batch_size untyped + zookeeper_cnt_sync_processor_batch_size + # TYPE zookeeper_cnt_sync_processor_queue_and_flush_time_ms untyped + zookeeper_cnt_sync_processor_queue_and_flush_time_ms + # TYPE zookeeper_cnt_sync_processor_queue_flush_time_ms untyped + zookeeper_cnt_sync_processor_queue_flush_time_ms + # TYPE zookeeper_cnt_sync_processor_queue_size untyped + zookeeper_cnt_sync_processor_queue_size + # TYPE zookeeper_cnt_sync_processor_queue_time_ms untyped + zookeeper_cnt_sync_processor_queue_time_ms + # TYPE zookeeper_cnt_time_waiting_empty_pool_in_commit_processor_read_ms untyped + zookeeper_cnt_time_waiting_empty_pool_in_commit_processor_read_ms + # TYPE zookeeper_cnt_updatelatency untyped + zookeeper_cnt_updatelatency + # TYPE zookeeper_cnt_write_batch_time_in_commit_processor untyped + zookeeper_cnt_write_batch_time_in_commit_processor + # TYPE zookeeper_cnt_write_commit_proc_issued untyped + zookeeper_cnt_write_commit_proc_issued + # TYPE zookeeper_cnt_write_commit_proc_req_queued untyped + zookeeper_cnt_write_commit_proc_req_queued + # TYPE zookeeper_cnt_write_commitproc_time_ms untyped + zookeeper_cnt_write_commitproc_time_ms + # TYPE zookeeper_cnt_write_final_proc_time_ms untyped + zookeeper_cnt_write_final_proc_time_ms + # TYPE zookeeper_cnt_zk_cluster_management_write_per_namespace untyped + zookeeper_cnt_zk_cluster_management_write_per_namespace + # TYPE zookeeper_cnt_zookeeper_read_per_namespace untyped + zookeeper_cnt_zookeeper_read_per_namespace + # TYPE zookeeper_cnt_zookeeper_write_per_namespace untyped + zookeeper_cnt_zookeeper_write_per_namespace + # TYPE zookeeper_commit_count untyped + zookeeper_commit_count + # TYPE zookeeper_connection_drop_count untyped + zookeeper_connection_drop_count + # TYPE zookeeper_connection_rejected untyped + zookeeper_connection_rejected + # TYPE zookeeper_connection_request_count untyped + zookeeper_connection_request_count + # TYPE zookeeper_connection_revalidate_count untyped + zookeeper_connection_revalidate_count + # TYPE zookeeper_dead_watchers_cleared untyped + zookeeper_dead_watchers_cleared + # TYPE zookeeper_dead_watchers_queued untyped + zookeeper_dead_watchers_queued + # TYPE zookeeper_diff_count untyped + zookeeper_diff_count + # TYPE zookeeper_digest_mismatches_count untyped + zookeeper_digest_mismatches_count + # TYPE zookeeper_ensemble_auth_fail untyped + zookeeper_ensemble_auth_fail + # TYPE zookeeper_ensemble_auth_skip untyped + zookeeper_ensemble_auth_skip + # TYPE zookeeper_ensemble_auth_success untyped + zookeeper_ensemble_auth_success + # TYPE zookeeper_ephemerals_count untyped + zookeeper_ephemerals_count + # TYPE zookeeper_global_sessions untyped + zookeeper_global_sessions + # TYPE zookeeper_large_requests_rejected untyped + zookeeper_large_requests_rejected + # TYPE zookeeper_last_client_response_size untyped + zookeeper_last_client_response_size + # TYPE zookeeper_last_proposal_size untyped + zookeeper_last_proposal_size + # TYPE zookeeper_leader_uptime untyped + zookeeper_leader_uptime + # TYPE zookeeper_learner_commit_received_count untyped + zookeeper_learner_commit_received_count + # TYPE zookeeper_learner_proposal_received_count untyped + zookeeper_learner_proposal_received_count + # TYPE zookeeper_learners untyped + zookeeper_learners + # TYPE zookeeper_local_sessions untyped + zookeeper_local_sessions + # TYPE zookeeper_looking_count untyped + zookeeper_looking_count + # TYPE zookeeper_max_1_ack_latency untyped + zookeeper_max_1_ack_latency + # TYPE zookeeper_max_action_create_service_user_done_write_per_namespace untyped + zookeeper_max_action_create_service_user_done_write_per_namespace + # TYPE zookeeper_max_action_grant_federated_queries_access_done_write_per_namespace untyped + zookeeper_max_action_grant_federated_queries_access_done_write_per_namespace + # TYPE zookeeper_max_action_grant_federated_queries_access_v2_done_write_per_namespace untyped + zookeeper_max_action_grant_federated_queries_access_v2_done_write_per_namespace + # TYPE zookeeper_max_action_restore_from_astacus_done_write_per_namespace untyped + zookeeper_max_action_restore_from_astacus_done_write_per_namespace + # TYPE zookeeper_max_action_update_service_users_privileges_done_write_per_namespace untyped + zookeeper_max_action_update_service_users_privileges_done_write_per_namespace + # TYPE zookeeper_max_action_update_service_users_privileges_v2_done_write_per_namespace untyped + zookeeper_max_action_update_service_users_privileges_v2_done_write_per_namespace + # TYPE zookeeper_max_clickhouse_read_per_namespace untyped + zookeeper_max_clickhouse_read_per_namespace + # TYPE zookeeper_max_clickhouse_write_per_namespace untyped + zookeeper_max_clickhouse_write_per_namespace + # TYPE zookeeper_max_client_response_size untyped + zookeeper_max_client_response_size + # TYPE zookeeper_max_close_session_prep_time untyped + zookeeper_max_close_session_prep_time + # TYPE zookeeper_max_commit_commit_proc_req_queued untyped + zookeeper_max_commit_commit_proc_req_queued + # TYPE zookeeper_max_commit_process_time untyped + zookeeper_max_commit_process_time + # TYPE zookeeper_max_commit_propagation_latency untyped + zookeeper_max_commit_propagation_latency + # TYPE zookeeper_max_concurrent_request_processing_in_commit_processor untyped + zookeeper_max_concurrent_request_processing_in_commit_processor + # TYPE zookeeper_max_connection_token_deficit untyped + zookeeper_max_connection_token_deficit + # TYPE zookeeper_max_dbinittime untyped + zookeeper_max_dbinittime + # TYPE zookeeper_max_dead_watchers_cleaner_latency untyped + zookeeper_max_dead_watchers_cleaner_latency + # TYPE zookeeper_max_election_leader_read_per_namespace untyped + zookeeper_max_election_leader_read_per_namespace + # TYPE zookeeper_max_election_leader_write_per_namespace untyped + zookeeper_max_election_leader_write_per_namespace + # TYPE zookeeper_max_election_time untyped + zookeeper_max_election_time + # TYPE zookeeper_max_file_descriptor_count untyped + zookeeper_max_file_descriptor_count + # TYPE zookeeper_max_follower_sync_time untyped + zookeeper_max_follower_sync_time + # TYPE zookeeper_max_fsynctime untyped + zookeeper_max_fsynctime + # TYPE zookeeper_max_health_write_per_namespace untyped + zookeeper_max_health_write_per_namespace + # TYPE zookeeper_max_inflight_diff_count untyped + zookeeper_max_inflight_diff_count + # TYPE zookeeper_max_inflight_snap_count untyped + zookeeper_max_inflight_snap_count + # TYPE zookeeper_max_jvm_pause_time_ms untyped + zookeeper_max_jvm_pause_time_ms + # TYPE zookeeper_max_latency untyped + zookeeper_max_latency + # TYPE zookeeper_max_local_write_committed_time_ms untyped + zookeeper_max_local_write_committed_time_ms + # TYPE zookeeper_max_netty_queued_buffer_capacity untyped + zookeeper_max_netty_queued_buffer_capacity + # TYPE zookeeper_max_node_changed_watch_count untyped + zookeeper_max_node_changed_watch_count + # TYPE zookeeper_max_node_children_watch_count untyped + zookeeper_max_node_children_watch_count + # TYPE zookeeper_max_node_created_watch_count untyped + zookeeper_max_node_created_watch_count + # TYPE zookeeper_max_node_deleted_watch_count untyped + zookeeper_max_node_deleted_watch_count + # TYPE zookeeper_max_node_slots_read_per_namespace untyped + zookeeper_max_node_slots_read_per_namespace + # TYPE zookeeper_max_node_slots_write_per_namespace untyped + zookeeper_max_node_slots_write_per_namespace + # TYPE zookeeper_max_nodes_read_per_namespace untyped + zookeeper_max_nodes_read_per_namespace + # TYPE zookeeper_max_nodes_write_per_namespace untyped + zookeeper_max_nodes_write_per_namespace + # TYPE zookeeper_max_om_commit_process_time_ms untyped + zookeeper_max_om_commit_process_time_ms + # TYPE zookeeper_max_om_proposal_process_time_ms untyped + zookeeper_max_om_proposal_process_time_ms + # TYPE zookeeper_max_pending_session_queue_size untyped + zookeeper_max_pending_session_queue_size + # TYPE zookeeper_max_prep_process_time untyped + zookeeper_max_prep_process_time + # TYPE zookeeper_max_prep_processor_queue_size untyped + zookeeper_max_prep_processor_queue_size + # TYPE zookeeper_max_prep_processor_queue_time_ms untyped + zookeeper_max_prep_processor_queue_time_ms + # TYPE zookeeper_max_propagation_latency untyped + zookeeper_max_propagation_latency + # TYPE zookeeper_max_proposal_ack_creation_latency untyped + zookeeper_max_proposal_ack_creation_latency + # TYPE zookeeper_max_proposal_latency untyped + zookeeper_max_proposal_latency + # TYPE zookeeper_max_proposal_size untyped + zookeeper_max_proposal_size + # TYPE zookeeper_max_quorum_ack_latency untyped + zookeeper_max_quorum_ack_latency + # TYPE zookeeper_max_read_commit_proc_issued untyped + zookeeper_max_read_commit_proc_issued + # TYPE zookeeper_max_read_commit_proc_req_queued untyped + zookeeper_max_read_commit_proc_req_queued + # TYPE zookeeper_max_read_commitproc_time_ms untyped + zookeeper_max_read_commitproc_time_ms + # TYPE zookeeper_max_read_final_proc_time_ms untyped + zookeeper_max_read_final_proc_time_ms + # TYPE zookeeper_max_readlatency untyped + zookeeper_max_readlatency + # TYPE zookeeper_max_reads_after_write_in_session_queue untyped + zookeeper_max_reads_after_write_in_session_queue + # TYPE zookeeper_max_reads_issued_from_session_queue untyped + zookeeper_max_reads_issued_from_session_queue + # TYPE zookeeper_max_requests_in_session_queue untyped + zookeeper_max_requests_in_session_queue + # TYPE zookeeper_max_server_write_committed_time_ms untyped + zookeeper_max_server_write_committed_time_ms + # TYPE zookeeper_max_session_queues_drained untyped + zookeeper_max_session_queues_drained + # TYPE zookeeper_max_snapshottime untyped + zookeeper_max_snapshottime + # TYPE zookeeper_max_startup_snap_load_time untyped + zookeeper_max_startup_snap_load_time + # TYPE zookeeper_max_startup_txns_load_time untyped + zookeeper_max_startup_txns_load_time + # TYPE zookeeper_max_startup_txns_loaded untyped + zookeeper_max_startup_txns_loaded + # TYPE zookeeper_max_sync_process_time untyped + zookeeper_max_sync_process_time + # TYPE zookeeper_max_sync_processor_batch_size untyped + zookeeper_max_sync_processor_batch_size + # TYPE zookeeper_max_sync_processor_queue_and_flush_time_ms untyped + zookeeper_max_sync_processor_queue_and_flush_time_ms + # TYPE zookeeper_max_sync_processor_queue_flush_time_ms untyped + zookeeper_max_sync_processor_queue_flush_time_ms + # TYPE zookeeper_max_sync_processor_queue_size untyped + zookeeper_max_sync_processor_queue_size + # TYPE zookeeper_max_sync_processor_queue_time_ms untyped + zookeeper_max_sync_processor_queue_time_ms + # TYPE zookeeper_max_time_waiting_empty_pool_in_commit_processor_read_ms untyped + zookeeper_max_time_waiting_empty_pool_in_commit_processor_read_ms + # TYPE zookeeper_max_updatelatency untyped + zookeeper_max_updatelatency + # TYPE zookeeper_max_write_batch_time_in_commit_processor untyped + zookeeper_max_write_batch_time_in_commit_processor + # TYPE zookeeper_max_write_commit_proc_issued untyped + zookeeper_max_write_commit_proc_issued + # TYPE zookeeper_max_write_commit_proc_req_queued untyped + zookeeper_max_write_commit_proc_req_queued + # TYPE zookeeper_max_write_commitproc_time_ms untyped + zookeeper_max_write_commitproc_time_ms + # TYPE zookeeper_max_write_final_proc_time_ms untyped + zookeeper_max_write_final_proc_time_ms + # TYPE zookeeper_max_zk_cluster_management_write_per_namespace untyped + zookeeper_max_zk_cluster_management_write_per_namespace + # TYPE zookeeper_max_zookeeper_read_per_namespace untyped + zookeeper_max_zookeeper_read_per_namespace + # TYPE zookeeper_max_zookeeper_write_per_namespace untyped + zookeeper_max_zookeeper_write_per_namespace + # TYPE zookeeper_min_1_ack_latency untyped + zookeeper_min_1_ack_latency + # TYPE zookeeper_min_action_create_service_user_done_write_per_namespace untyped + zookeeper_min_action_create_service_user_done_write_per_namespace + # TYPE zookeeper_min_action_grant_federated_queries_access_done_write_per_namespace untyped + zookeeper_min_action_grant_federated_queries_access_done_write_per_namespace + # TYPE zookeeper_min_action_grant_federated_queries_access_v2_done_write_per_namespace untyped + zookeeper_min_action_grant_federated_queries_access_v2_done_write_per_namespace + # TYPE zookeeper_min_action_restore_from_astacus_done_write_per_namespace untyped + zookeeper_min_action_restore_from_astacus_done_write_per_namespace + # TYPE zookeeper_min_action_update_service_users_privileges_done_write_per_namespace untyped + zookeeper_min_action_update_service_users_privileges_done_write_per_namespace + # TYPE zookeeper_min_action_update_service_users_privileges_v2_done_write_per_namespace untyped + zookeeper_min_action_update_service_users_privileges_v2_done_write_per_namespace + # TYPE zookeeper_min_clickhouse_read_per_namespace untyped + zookeeper_min_clickhouse_read_per_namespace + # TYPE zookeeper_min_clickhouse_write_per_namespace untyped + zookeeper_min_clickhouse_write_per_namespace + # TYPE zookeeper_min_client_response_size untyped + zookeeper_min_client_response_size + # TYPE zookeeper_min_close_session_prep_time untyped + zookeeper_min_close_session_prep_time + # TYPE zookeeper_min_commit_commit_proc_req_queued untyped + zookeeper_min_commit_commit_proc_req_queued + # TYPE zookeeper_min_commit_process_time untyped + zookeeper_min_commit_process_time + # TYPE zookeeper_min_commit_propagation_latency untyped + zookeeper_min_commit_propagation_latency + # TYPE zookeeper_min_concurrent_request_processing_in_commit_processor untyped + zookeeper_min_concurrent_request_processing_in_commit_processor + # TYPE zookeeper_min_connection_token_deficit untyped + zookeeper_min_connection_token_deficit + # TYPE zookeeper_min_dbinittime untyped + zookeeper_min_dbinittime + # TYPE zookeeper_min_dead_watchers_cleaner_latency untyped + zookeeper_min_dead_watchers_cleaner_latency + # TYPE zookeeper_min_election_leader_read_per_namespace untyped + zookeeper_min_election_leader_read_per_namespace + # TYPE zookeeper_min_election_leader_write_per_namespace untyped + zookeeper_min_election_leader_write_per_namespace + # TYPE zookeeper_min_election_time untyped + zookeeper_min_election_time + # TYPE zookeeper_min_follower_sync_time untyped + zookeeper_min_follower_sync_time + # TYPE zookeeper_min_fsynctime untyped + zookeeper_min_fsynctime + # TYPE zookeeper_min_health_write_per_namespace untyped + zookeeper_min_health_write_per_namespace + # TYPE zookeeper_min_inflight_diff_count untyped + zookeeper_min_inflight_diff_count + # TYPE zookeeper_min_inflight_snap_count untyped + zookeeper_min_inflight_snap_count + # TYPE zookeeper_min_jvm_pause_time_ms untyped + zookeeper_min_jvm_pause_time_ms + # TYPE zookeeper_min_latency untyped + zookeeper_min_latency + # TYPE zookeeper_min_local_write_committed_time_ms untyped + zookeeper_min_local_write_committed_time_ms + # TYPE zookeeper_min_netty_queued_buffer_capacity untyped + zookeeper_min_netty_queued_buffer_capacity + # TYPE zookeeper_min_node_changed_watch_count untyped + zookeeper_min_node_changed_watch_count + # TYPE zookeeper_min_node_children_watch_count untyped + zookeeper_min_node_children_watch_count + # TYPE zookeeper_min_node_created_watch_count untyped + zookeeper_min_node_created_watch_count + # TYPE zookeeper_min_node_deleted_watch_count untyped + zookeeper_min_node_deleted_watch_count + # TYPE zookeeper_min_node_slots_read_per_namespace untyped + zookeeper_min_node_slots_read_per_namespace + # TYPE zookeeper_min_node_slots_write_per_namespace untyped + zookeeper_min_node_slots_write_per_namespace + # TYPE zookeeper_min_nodes_read_per_namespace untyped + zookeeper_min_nodes_read_per_namespace + # TYPE zookeeper_min_nodes_write_per_namespace untyped + zookeeper_min_nodes_write_per_namespace + # TYPE zookeeper_min_om_commit_process_time_ms untyped + zookeeper_min_om_commit_process_time_ms + # TYPE zookeeper_min_om_proposal_process_time_ms untyped + zookeeper_min_om_proposal_process_time_ms + # TYPE zookeeper_min_pending_session_queue_size untyped + zookeeper_min_pending_session_queue_size + # TYPE zookeeper_min_prep_process_time untyped + zookeeper_min_prep_process_time + # TYPE zookeeper_min_prep_processor_queue_size untyped + zookeeper_min_prep_processor_queue_size + # TYPE zookeeper_min_prep_processor_queue_time_ms untyped + zookeeper_min_prep_processor_queue_time_ms + # TYPE zookeeper_min_propagation_latency untyped + zookeeper_min_propagation_latency + # TYPE zookeeper_min_proposal_ack_creation_latency untyped + zookeeper_min_proposal_ack_creation_latency + # TYPE zookeeper_min_proposal_latency untyped + zookeeper_min_proposal_latency + # TYPE zookeeper_min_proposal_size untyped + zookeeper_min_proposal_size + # TYPE zookeeper_min_quorum_ack_latency untyped + zookeeper_min_quorum_ack_latency + # TYPE zookeeper_min_read_commit_proc_issued untyped + zookeeper_min_read_commit_proc_issued + # TYPE zookeeper_min_read_commit_proc_req_queued untyped + zookeeper_min_read_commit_proc_req_queued + # TYPE zookeeper_min_read_commitproc_time_ms untyped + zookeeper_min_read_commitproc_time_ms + # TYPE zookeeper_min_read_final_proc_time_ms untyped + zookeeper_min_read_final_proc_time_ms + # TYPE zookeeper_min_readlatency untyped + zookeeper_min_readlatency + # TYPE zookeeper_min_reads_after_write_in_session_queue untyped + zookeeper_min_reads_after_write_in_session_queue + # TYPE zookeeper_min_reads_issued_from_session_queue untyped + zookeeper_min_reads_issued_from_session_queue + # TYPE zookeeper_min_requests_in_session_queue untyped + zookeeper_min_requests_in_session_queue + # TYPE zookeeper_min_server_write_committed_time_ms untyped + zookeeper_min_server_write_committed_time_ms + # TYPE zookeeper_min_session_queues_drained untyped + zookeeper_min_session_queues_drained + # TYPE zookeeper_min_snapshottime untyped + zookeeper_min_snapshottime + # TYPE zookeeper_min_startup_snap_load_time untyped + zookeeper_min_startup_snap_load_time + # TYPE zookeeper_min_startup_txns_load_time untyped + zookeeper_min_startup_txns_load_time + # TYPE zookeeper_min_startup_txns_loaded untyped + zookeeper_min_startup_txns_loaded + # TYPE zookeeper_min_sync_process_time untyped + zookeeper_min_sync_process_time + # TYPE zookeeper_min_sync_processor_batch_size untyped + zookeeper_min_sync_processor_batch_size + # TYPE zookeeper_min_sync_processor_queue_and_flush_time_ms untyped + zookeeper_min_sync_processor_queue_and_flush_time_ms + # TYPE zookeeper_min_sync_processor_queue_flush_time_ms untyped + zookeeper_min_sync_processor_queue_flush_time_ms + # TYPE zookeeper_min_sync_processor_queue_size untyped + zookeeper_min_sync_processor_queue_size + # TYPE zookeeper_min_sync_processor_queue_time_ms untyped + zookeeper_min_sync_processor_queue_time_ms + # TYPE zookeeper_min_time_waiting_empty_pool_in_commit_processor_read_ms untyped + zookeeper_min_time_waiting_empty_pool_in_commit_processor_read_ms + # TYPE zookeeper_min_updatelatency untyped + zookeeper_min_updatelatency + # TYPE zookeeper_min_write_batch_time_in_commit_processor untyped + zookeeper_min_write_batch_time_in_commit_processor + # TYPE zookeeper_min_write_commit_proc_issued untyped + zookeeper_min_write_commit_proc_issued + # TYPE zookeeper_min_write_commit_proc_req_queued untyped + zookeeper_min_write_commit_proc_req_queued + # TYPE zookeeper_min_write_commitproc_time_ms untyped + zookeeper_min_write_commitproc_time_ms + # TYPE zookeeper_min_write_final_proc_time_ms untyped + zookeeper_min_write_final_proc_time_ms + # TYPE zookeeper_min_zk_cluster_management_write_per_namespace untyped + zookeeper_min_zk_cluster_management_write_per_namespace + # TYPE zookeeper_min_zookeeper_read_per_namespace untyped + zookeeper_min_zookeeper_read_per_namespace + # TYPE zookeeper_min_zookeeper_write_per_namespace untyped + zookeeper_min_zookeeper_write_per_namespace + # TYPE zookeeper_non_mtls_local_conn_count untyped + zookeeper_non_mtls_local_conn_count + # TYPE zookeeper_non_mtls_remote_conn_count untyped + zookeeper_non_mtls_remote_conn_count + # TYPE zookeeper_num_alive_connections untyped + zookeeper_num_alive_connections + # TYPE zookeeper_open_file_descriptor_count untyped + zookeeper_open_file_descriptor_count + # TYPE zookeeper_outstanding_changes_queued untyped + zookeeper_outstanding_changes_queued + # TYPE zookeeper_outstanding_changes_removed untyped + zookeeper_outstanding_changes_removed + # TYPE zookeeper_outstanding_requests untyped + zookeeper_outstanding_requests + # TYPE zookeeper_outstanding_tls_handshake untyped + zookeeper_outstanding_tls_handshake + # TYPE zookeeper_p50_1_ack_latency untyped + zookeeper_p50_1_ack_latency + # TYPE zookeeper_p50_close_session_prep_time untyped + zookeeper_p50_close_session_prep_time + # TYPE zookeeper_p50_commit_propagation_latency untyped + zookeeper_p50_commit_propagation_latency + # TYPE zookeeper_p50_dead_watchers_cleaner_latency untyped + zookeeper_p50_dead_watchers_cleaner_latency + # TYPE zookeeper_p50_jvm_pause_time_ms untyped + zookeeper_p50_jvm_pause_time_ms + # TYPE zookeeper_p50_local_write_committed_time_ms untyped + zookeeper_p50_local_write_committed_time_ms + # TYPE zookeeper_p50_om_commit_process_time_ms untyped + zookeeper_p50_om_commit_process_time_ms + # TYPE zookeeper_p50_om_proposal_process_time_ms untyped + zookeeper_p50_om_proposal_process_time_ms + # TYPE zookeeper_p50_prep_processor_queue_time_ms untyped + zookeeper_p50_prep_processor_queue_time_ms + # TYPE zookeeper_p50_propagation_latency untyped + zookeeper_p50_propagation_latency + # TYPE zookeeper_p50_proposal_ack_creation_latency untyped + zookeeper_p50_proposal_ack_creation_latency + # TYPE zookeeper_p50_proposal_latency untyped + zookeeper_p50_proposal_latency + # TYPE zookeeper_p50_quorum_ack_latency untyped + zookeeper_p50_quorum_ack_latency + # TYPE zookeeper_p50_read_commitproc_time_ms untyped + zookeeper_p50_read_commitproc_time_ms + # TYPE zookeeper_p50_read_final_proc_time_ms untyped + zookeeper_p50_read_final_proc_time_ms + # TYPE zookeeper_p50_readlatency untyped + zookeeper_p50_readlatency + # TYPE zookeeper_p50_server_write_committed_time_ms untyped + zookeeper_p50_server_write_committed_time_ms + # TYPE zookeeper_p50_sync_processor_queue_and_flush_time_ms untyped + zookeeper_p50_sync_processor_queue_and_flush_time_ms + # TYPE zookeeper_p50_sync_processor_queue_flush_time_ms untyped + zookeeper_p50_sync_processor_queue_flush_time_ms + # TYPE zookeeper_p50_sync_processor_queue_time_ms untyped + zookeeper_p50_sync_processor_queue_time_ms + # TYPE zookeeper_p50_updatelatency untyped + zookeeper_p50_updatelatency + # TYPE zookeeper_p50_write_commitproc_time_ms untyped + zookeeper_p50_write_commitproc_time_ms + # TYPE zookeeper_p50_write_final_proc_time_ms untyped + zookeeper_p50_write_final_proc_time_ms + # TYPE zookeeper_p95_1_ack_latency untyped + zookeeper_p95_1_ack_latency + # TYPE zookeeper_p95_close_session_prep_time untyped + zookeeper_p95_close_session_prep_time + # TYPE zookeeper_p95_commit_propagation_latency untyped + zookeeper_p95_commit_propagation_latency + # TYPE zookeeper_p95_dead_watchers_cleaner_latency untyped + zookeeper_p95_dead_watchers_cleaner_latency + # TYPE zookeeper_p95_jvm_pause_time_ms untyped + zookeeper_p95_jvm_pause_time_ms + # TYPE zookeeper_p95_local_write_committed_time_ms untyped + zookeeper_p95_local_write_committed_time_ms + # TYPE zookeeper_p95_om_commit_process_time_ms untyped + zookeeper_p95_om_commit_process_time_ms + # TYPE zookeeper_p95_om_proposal_process_time_ms untyped + zookeeper_p95_om_proposal_process_time_ms + # TYPE zookeeper_p95_prep_processor_queue_time_ms untyped + zookeeper_p95_prep_processor_queue_time_ms + # TYPE zookeeper_p95_propagation_latency untyped + zookeeper_p95_propagation_latency + # TYPE zookeeper_p95_proposal_ack_creation_latency untyped + zookeeper_p95_proposal_ack_creation_latency + # TYPE zookeeper_p95_proposal_latency untyped + zookeeper_p95_proposal_latency + # TYPE zookeeper_p95_quorum_ack_latency untyped + zookeeper_p95_quorum_ack_latency + # TYPE zookeeper_p95_read_commitproc_time_ms untyped + zookeeper_p95_read_commitproc_time_ms + # TYPE zookeeper_p95_read_final_proc_time_ms untyped + zookeeper_p95_read_final_proc_time_ms + # TYPE zookeeper_p95_readlatency untyped + zookeeper_p95_readlatency + # TYPE zookeeper_p95_server_write_committed_time_ms untyped + zookeeper_p95_server_write_committed_time_ms + # TYPE zookeeper_p95_sync_processor_queue_and_flush_time_ms untyped + zookeeper_p95_sync_processor_queue_and_flush_time_ms + # TYPE zookeeper_p95_sync_processor_queue_flush_time_ms untyped + zookeeper_p95_sync_processor_queue_flush_time_ms + # TYPE zookeeper_p95_sync_processor_queue_time_ms untyped + zookeeper_p95_sync_processor_queue_time_ms + # TYPE zookeeper_p95_updatelatency untyped + zookeeper_p95_updatelatency + # TYPE zookeeper_p95_write_commitproc_time_ms untyped + zookeeper_p95_write_commitproc_time_ms + # TYPE zookeeper_p95_write_final_proc_time_ms untyped + zookeeper_p95_write_final_proc_time_ms + # TYPE zookeeper_p999_1_ack_latency untyped + zookeeper_p999_1_ack_latency + # TYPE zookeeper_p999_close_session_prep_time untyped + zookeeper_p999_close_session_prep_time + # TYPE zookeeper_p999_commit_propagation_latency untyped + zookeeper_p999_commit_propagation_latency + # TYPE zookeeper_p999_dead_watchers_cleaner_latency untyped + zookeeper_p999_dead_watchers_cleaner_latency + # TYPE zookeeper_p999_jvm_pause_time_ms untyped + zookeeper_p999_jvm_pause_time_ms + # TYPE zookeeper_p999_local_write_committed_time_ms untyped + zookeeper_p999_local_write_committed_time_ms + # TYPE zookeeper_p999_om_commit_process_time_ms untyped + zookeeper_p999_om_commit_process_time_ms + # TYPE zookeeper_p999_om_proposal_process_time_ms untyped + zookeeper_p999_om_proposal_process_time_ms + # TYPE zookeeper_p999_prep_processor_queue_time_ms untyped + zookeeper_p999_prep_processor_queue_time_ms + # TYPE zookeeper_p999_propagation_latency untyped + zookeeper_p999_propagation_latency + # TYPE zookeeper_p999_proposal_ack_creation_latency untyped + zookeeper_p999_proposal_ack_creation_latency + # TYPE zookeeper_p999_proposal_latency untyped + zookeeper_p999_proposal_latency + # TYPE zookeeper_p999_quorum_ack_latency untyped + zookeeper_p999_quorum_ack_latency + # TYPE zookeeper_p999_read_commitproc_time_ms untyped + zookeeper_p999_read_commitproc_time_ms + # TYPE zookeeper_p999_read_final_proc_time_ms untyped + zookeeper_p999_read_final_proc_time_ms + # TYPE zookeeper_p999_readlatency untyped + zookeeper_p999_readlatency + # TYPE zookeeper_p999_server_write_committed_time_ms untyped + zookeeper_p999_server_write_committed_time_ms + # TYPE zookeeper_p999_sync_processor_queue_and_flush_time_ms untyped + zookeeper_p999_sync_processor_queue_and_flush_time_ms + # TYPE zookeeper_p999_sync_processor_queue_flush_time_ms untyped + zookeeper_p999_sync_processor_queue_flush_time_ms + # TYPE zookeeper_p999_sync_processor_queue_time_ms untyped + zookeeper_p999_sync_processor_queue_time_ms + # TYPE zookeeper_p999_updatelatency untyped + zookeeper_p999_updatelatency + # TYPE zookeeper_p999_write_commitproc_time_ms untyped + zookeeper_p999_write_commitproc_time_ms + # TYPE zookeeper_p999_write_final_proc_time_ms untyped + zookeeper_p999_write_final_proc_time_ms + # TYPE zookeeper_p99_1_ack_latency untyped + zookeeper_p99_1_ack_latency + # TYPE zookeeper_p99_close_session_prep_time untyped + zookeeper_p99_close_session_prep_time + # TYPE zookeeper_p99_commit_propagation_latency untyped + zookeeper_p99_commit_propagation_latency + # TYPE zookeeper_p99_dead_watchers_cleaner_latency untyped + zookeeper_p99_dead_watchers_cleaner_latency + # TYPE zookeeper_p99_jvm_pause_time_ms untyped + zookeeper_p99_jvm_pause_time_ms + # TYPE zookeeper_p99_local_write_committed_time_ms untyped + zookeeper_p99_local_write_committed_time_ms + # TYPE zookeeper_p99_om_commit_process_time_ms untyped + zookeeper_p99_om_commit_process_time_ms + # TYPE zookeeper_p99_om_proposal_process_time_ms untyped + zookeeper_p99_om_proposal_process_time_ms + # TYPE zookeeper_p99_prep_processor_queue_time_ms untyped + zookeeper_p99_prep_processor_queue_time_ms + # TYPE zookeeper_p99_propagation_latency untyped + zookeeper_p99_propagation_latency + # TYPE zookeeper_p99_proposal_ack_creation_latency untyped + zookeeper_p99_proposal_ack_creation_latency + # TYPE zookeeper_p99_proposal_latency untyped + zookeeper_p99_proposal_latency + # TYPE zookeeper_p99_quorum_ack_latency untyped + zookeeper_p99_quorum_ack_latency + # TYPE zookeeper_p99_read_commitproc_time_ms untyped + zookeeper_p99_read_commitproc_time_ms + # TYPE zookeeper_p99_read_final_proc_time_ms untyped + zookeeper_p99_read_final_proc_time_ms + # TYPE zookeeper_p99_readlatency untyped + zookeeper_p99_readlatency + # TYPE zookeeper_p99_server_write_committed_time_ms untyped + zookeeper_p99_server_write_committed_time_ms + # TYPE zookeeper_p99_sync_processor_queue_and_flush_time_ms untyped + zookeeper_p99_sync_processor_queue_and_flush_time_ms + # TYPE zookeeper_p99_sync_processor_queue_flush_time_ms untyped + zookeeper_p99_sync_processor_queue_flush_time_ms + # TYPE zookeeper_p99_sync_processor_queue_time_ms untyped + zookeeper_p99_sync_processor_queue_time_ms + # TYPE zookeeper_p99_updatelatency untyped + zookeeper_p99_updatelatency + # TYPE zookeeper_p99_write_commitproc_time_ms untyped + zookeeper_p99_write_commitproc_time_ms + # TYPE zookeeper_p99_write_final_proc_time_ms untyped + zookeeper_p99_write_final_proc_time_ms + # TYPE zookeeper_packets_received untyped + zookeeper_packets_received + # TYPE zookeeper_packets_sent untyped + zookeeper_packets_sent + # TYPE zookeeper_pending_syncs untyped + zookeeper_pending_syncs + # TYPE zookeeper_prep_processor_request_queued untyped + zookeeper_prep_processor_request_queued + # TYPE zookeeper_proposal_count untyped + zookeeper_proposal_count + # TYPE zookeeper_quit_leading_due_to_disloyal_voter untyped + zookeeper_quit_leading_due_to_disloyal_voter + # TYPE zookeeper_quorum_size untyped + zookeeper_quorum_size + # TYPE zookeeper_request_commit_queued untyped + zookeeper_request_commit_queued + # TYPE zookeeper_request_throttle_wait_count untyped + zookeeper_request_throttle_wait_count + # TYPE zookeeper_response_packet_cache_hits untyped + zookeeper_response_packet_cache_hits + # TYPE zookeeper_response_packet_cache_misses untyped + zookeeper_response_packet_cache_misses + # TYPE zookeeper_response_packet_get_children_cache_hits untyped + zookeeper_response_packet_get_children_cache_hits + # TYPE zookeeper_response_packet_get_children_cache_misses untyped + zookeeper_response_packet_get_children_cache_misses + # TYPE zookeeper_revalidate_count untyped + zookeeper_revalidate_count + # TYPE zookeeper_sessionless_connections_expired untyped + zookeeper_sessionless_connections_expired + # TYPE zookeeper_snap_count untyped + zookeeper_snap_count + # TYPE zookeeper_stale_replies untyped + zookeeper_stale_replies + # TYPE zookeeper_stale_requests untyped + zookeeper_stale_requests + # TYPE zookeeper_stale_requests_dropped untyped + zookeeper_stale_requests_dropped + # TYPE zookeeper_stale_sessions_expired untyped + zookeeper_stale_sessions_expired + # TYPE zookeeper_sum_1_ack_latency untyped + zookeeper_sum_1_ack_latency + # TYPE zookeeper_sum_action_create_service_user_done_write_per_namespace untyped + zookeeper_sum_action_create_service_user_done_write_per_namespace + # TYPE zookeeper_sum_action_grant_federated_queries_access_done_write_per_namespace untyped + zookeeper_sum_action_grant_federated_queries_access_done_write_per_namespace + # TYPE zookeeper_sum_action_grant_federated_queries_access_v2_done_write_per_namespace untyped + zookeeper_sum_action_grant_federated_queries_access_v2_done_write_per_namespace + # TYPE zookeeper_sum_action_restore_from_astacus_done_write_per_namespace untyped + zookeeper_sum_action_restore_from_astacus_done_write_per_namespace + # TYPE zookeeper_sum_action_update_service_users_privileges_done_write_per_namespace untyped + zookeeper_sum_action_update_service_users_privileges_done_write_per_namespace + # TYPE zookeeper_sum_action_update_service_users_privileges_v2_done_write_per_namespace untyped + zookeeper_sum_action_update_service_users_privileges_v2_done_write_per_namespace + # TYPE zookeeper_sum_clickhouse_read_per_namespace untyped + zookeeper_sum_clickhouse_read_per_namespace + # TYPE zookeeper_sum_clickhouse_write_per_namespace untyped + zookeeper_sum_clickhouse_write_per_namespace + # TYPE zookeeper_sum_close_session_prep_time untyped + zookeeper_sum_close_session_prep_time + # TYPE zookeeper_sum_commit_commit_proc_req_queued untyped + zookeeper_sum_commit_commit_proc_req_queued + # TYPE zookeeper_sum_commit_process_time untyped + zookeeper_sum_commit_process_time + # TYPE zookeeper_sum_commit_propagation_latency untyped + zookeeper_sum_commit_propagation_latency + # TYPE zookeeper_sum_concurrent_request_processing_in_commit_processor untyped + zookeeper_sum_concurrent_request_processing_in_commit_processor + # TYPE zookeeper_sum_connection_token_deficit untyped + zookeeper_sum_connection_token_deficit + # TYPE zookeeper_sum_dbinittime untyped + zookeeper_sum_dbinittime + # TYPE zookeeper_sum_dead_watchers_cleaner_latency untyped + zookeeper_sum_dead_watchers_cleaner_latency + # TYPE zookeeper_sum_election_leader_read_per_namespace untyped + zookeeper_sum_election_leader_read_per_namespace + # TYPE zookeeper_sum_election_leader_write_per_namespace untyped + zookeeper_sum_election_leader_write_per_namespace + # TYPE zookeeper_sum_election_time untyped + zookeeper_sum_election_time + # TYPE zookeeper_sum_follower_sync_time untyped + zookeeper_sum_follower_sync_time + # TYPE zookeeper_sum_fsynctime untyped + zookeeper_sum_fsynctime + # TYPE zookeeper_sum_health_write_per_namespace untyped + zookeeper_sum_health_write_per_namespace + # TYPE zookeeper_sum_inflight_diff_count untyped + zookeeper_sum_inflight_diff_count + # TYPE zookeeper_sum_inflight_snap_count untyped + zookeeper_sum_inflight_snap_count + # TYPE zookeeper_sum_jvm_pause_time_ms untyped + zookeeper_sum_jvm_pause_time_ms + # TYPE zookeeper_sum_local_write_committed_time_ms untyped + zookeeper_sum_local_write_committed_time_ms + # TYPE zookeeper_sum_netty_queued_buffer_capacity untyped + zookeeper_sum_netty_queued_buffer_capacity + # TYPE zookeeper_sum_node_changed_watch_count untyped + zookeeper_sum_node_changed_watch_count + # TYPE zookeeper_sum_node_children_watch_count untyped + zookeeper_sum_node_children_watch_count + # TYPE zookeeper_sum_node_created_watch_count untyped + zookeeper_sum_node_created_watch_count + # TYPE zookeeper_sum_node_deleted_watch_count untyped + zookeeper_sum_node_deleted_watch_count + # TYPE zookeeper_sum_node_slots_read_per_namespace untyped + zookeeper_sum_node_slots_read_per_namespace + # TYPE zookeeper_sum_node_slots_write_per_namespace untyped + zookeeper_sum_node_slots_write_per_namespace + # TYPE zookeeper_sum_nodes_read_per_namespace untyped + zookeeper_sum_nodes_read_per_namespace + # TYPE zookeeper_sum_nodes_write_per_namespace untyped + zookeeper_sum_nodes_write_per_namespace + # TYPE zookeeper_sum_om_commit_process_time_ms untyped + zookeeper_sum_om_commit_process_time_ms + # TYPE zookeeper_sum_om_proposal_process_time_ms untyped + zookeeper_sum_om_proposal_process_time_ms + # TYPE zookeeper_sum_pending_session_queue_size untyped + zookeeper_sum_pending_session_queue_size + # TYPE zookeeper_sum_prep_process_time untyped + zookeeper_sum_prep_process_time + # TYPE zookeeper_sum_prep_processor_queue_size untyped + zookeeper_sum_prep_processor_queue_size + # TYPE zookeeper_sum_prep_processor_queue_time_ms untyped + zookeeper_sum_prep_processor_queue_time_ms + # TYPE zookeeper_sum_propagation_latency untyped + zookeeper_sum_propagation_latency + # TYPE zookeeper_sum_proposal_ack_creation_latency untyped + zookeeper_sum_proposal_ack_creation_latency + # TYPE zookeeper_sum_proposal_latency untyped + zookeeper_sum_proposal_latency + # TYPE zookeeper_sum_quorum_ack_latency untyped + zookeeper_sum_quorum_ack_latency + # TYPE zookeeper_sum_read_commit_proc_issued untyped + zookeeper_sum_read_commit_proc_issued + # TYPE zookeeper_sum_read_commit_proc_req_queued untyped + zookeeper_sum_read_commit_proc_req_queued + # TYPE zookeeper_sum_read_commitproc_time_ms untyped + zookeeper_sum_read_commitproc_time_ms + # TYPE zookeeper_sum_read_final_proc_time_ms untyped + zookeeper_sum_read_final_proc_time_ms + # TYPE zookeeper_sum_readlatency untyped + zookeeper_sum_readlatency + # TYPE zookeeper_sum_reads_after_write_in_session_queue untyped + zookeeper_sum_reads_after_write_in_session_queue + # TYPE zookeeper_sum_reads_issued_from_session_queue untyped + zookeeper_sum_reads_issued_from_session_queue + # TYPE zookeeper_sum_requests_in_session_queue untyped + zookeeper_sum_requests_in_session_queue + # TYPE zookeeper_sum_server_write_committed_time_ms untyped + zookeeper_sum_server_write_committed_time_ms + # TYPE zookeeper_sum_session_queues_drained untyped + zookeeper_sum_session_queues_drained + # TYPE zookeeper_sum_snapshottime untyped + zookeeper_sum_snapshottime + # TYPE zookeeper_sum_startup_snap_load_time untyped + zookeeper_sum_startup_snap_load_time + # TYPE zookeeper_sum_startup_txns_load_time untyped + zookeeper_sum_startup_txns_load_time + # TYPE zookeeper_sum_startup_txns_loaded untyped + zookeeper_sum_startup_txns_loaded + # TYPE zookeeper_sum_sync_process_time untyped + zookeeper_sum_sync_process_time + # TYPE zookeeper_sum_sync_processor_batch_size untyped + zookeeper_sum_sync_processor_batch_size + # TYPE zookeeper_sum_sync_processor_queue_and_flush_time_ms untyped + zookeeper_sum_sync_processor_queue_and_flush_time_ms + # TYPE zookeeper_sum_sync_processor_queue_flush_time_ms untyped + zookeeper_sum_sync_processor_queue_flush_time_ms + # TYPE zookeeper_sum_sync_processor_queue_size untyped + zookeeper_sum_sync_processor_queue_size + # TYPE zookeeper_sum_sync_processor_queue_time_ms untyped + zookeeper_sum_sync_processor_queue_time_ms + # TYPE zookeeper_sum_time_waiting_empty_pool_in_commit_processor_read_ms untyped + zookeeper_sum_time_waiting_empty_pool_in_commit_processor_read_ms + # TYPE zookeeper_sum_updatelatency untyped + zookeeper_sum_updatelatency + # TYPE zookeeper_sum_write_batch_time_in_commit_processor untyped + zookeeper_sum_write_batch_time_in_commit_processor + # TYPE zookeeper_sum_write_commit_proc_issued untyped + zookeeper_sum_write_commit_proc_issued + # TYPE zookeeper_sum_write_commit_proc_req_queued untyped + zookeeper_sum_write_commit_proc_req_queued + # TYPE zookeeper_sum_write_commitproc_time_ms untyped + zookeeper_sum_write_commitproc_time_ms + # TYPE zookeeper_sum_write_final_proc_time_ms untyped + zookeeper_sum_write_final_proc_time_ms + # TYPE zookeeper_sum_zk_cluster_management_write_per_namespace untyped + zookeeper_sum_zk_cluster_management_write_per_namespace + # TYPE zookeeper_sum_zookeeper_read_per_namespace untyped + zookeeper_sum_zookeeper_read_per_namespace + # TYPE zookeeper_sum_zookeeper_write_per_namespace untyped + zookeeper_sum_zookeeper_write_per_namespace + # TYPE zookeeper_sync_processor_request_queued untyped + zookeeper_sync_processor_request_queued + # TYPE zookeeper_synced_followers untyped + zookeeper_synced_followers + # TYPE zookeeper_synced_non_voting_followers untyped + zookeeper_synced_non_voting_followers + # TYPE zookeeper_synced_observers untyped + zookeeper_synced_observers + # TYPE zookeeper_tls_handshake_exceeded untyped + zookeeper_tls_handshake_exceeded + # TYPE zookeeper_unrecoverable_error_count untyped + zookeeper_unrecoverable_error_count + # TYPE zookeeper_uptime untyped + zookeeper_uptime + # TYPE zookeeper_watch_count untyped + zookeeper_watch_count + # TYPE zookeeper_znode_count untyped + zookeeper_znode_count diff --git a/docs/products/clickhouse/reference/limitations.rst b/docs/products/clickhouse/reference/limitations.rst index a7622fc514..3b506ae344 100644 --- a/docs/products/clickhouse/reference/limitations.rst +++ b/docs/products/clickhouse/reference/limitations.rst @@ -44,14 +44,17 @@ From the information about restrictions on using Aiven for ClickHouse, you can e - Aiven for ClickHouse doesn't support Kafka Schema Registry, which allows to build stream processing pipelines with schemas. - \- * - Querying all shards at once - - If you have a sharded plan, you must use a Distributed view on top of your MergeTree table to query all the shards at the same time, and you should use it for inserts too. - - Use the ``Distributed`` view with sharded plans. + - If you have a sharded plan, you must use a distributed table on top of your MergeTree table to query all the shards at the same time, and you should use it for inserts too. + - Use a distributed table with sharded plans. Check :doc:`Query data across shards ` for the instructions. * - ON CLUSTER queries - Aiven for ClickHouse doesn't support ON CLUSTER queries because it actually runs each data definition query on all the servers of the cluster without using `ON CLUSTER`. - Run queries without ``ON CLUSTER``. * - Creating a database using SQL - You cannot create a database directly using SQL, for example, if you'd like to add a non-default database. - Use the Aiven's public API. + * - Scaling down the number of nodes + - You only can scale up the number of nodes in a cluster. + - \- Limits ------ diff --git a/docs/products/clickhouse/reference/supported-interfaces-drivers.rst b/docs/products/clickhouse/reference/supported-interfaces-drivers.rst new file mode 100644 index 0000000000..75d5a9b38a --- /dev/null +++ b/docs/products/clickhouse/reference/supported-interfaces-drivers.rst @@ -0,0 +1,50 @@ +Interfaces and drivers supported in Aiven for ClickHouse® +===================================================================== + +Find out what technologies and tools you can use to interact with Aiven for ClickHouse®. + +.. _clickhouse-interfaces: + +Interfaces (protocols) +---------------------- + +Aiven for ClickHouse® supports the following fundamental underlying interfaces (protocols): + +* ``HTTPS`` +* ``Native TCP`` +* ``MySQL Interface`` + +.. note:: + + For security reasons, you need TLS (plaintext) to connect to Aiven for ClickHouse. + +.. topic:: Not supported interfaces (protocols) + + * `HTTP` + * `gRPC` + * `PostgreSQL` + +.. seealso:: + + For the full list of interfaces and protocols supported in ClickHouse, check out `Drivers and Interfaces `_. + +Drivers (libraries) +------------------- + +There are a number of drivers (libraries) that use one of :ref:`the fundamental underlying interfaces supported in Aiven for ClickHouse ` under the hood. It's up to you to pick up a driver (library) of your choice and use it for connecting to your Aiven for ClickHouse service. + +.. seealso:: + + * Check out :doc:`how to use different drivers (libraries) for connecting to Aiven for ClickHouse `. + + * For the full list of drivers and libraries that support connecting to ClickHouse, check out `Drivers and Interfaces `_. + +.. note:: + + You can connect to Aiven for ClickHouse with any driver that uses TLS and one of the supported protocols. + +Related pages +--------------- + +* :doc:`How to connect to Aiven for ClickHouse using different libraries ` +* `Drivers and interfaces supported in ClickHouse `_ diff --git a/docs/products/clickhouse/reference/supported-table-functions.rst b/docs/products/clickhouse/reference/supported-table-functions.rst index befb1b2971..a021f373a5 100644 --- a/docs/products/clickhouse/reference/supported-table-functions.rst +++ b/docs/products/clickhouse/reference/supported-table-functions.rst @@ -10,6 +10,10 @@ Table functions supported in Aiven for ClickHouse® SELECT * FROM deltaLake('s3://bucket/path/to/lake') +.. note:: + + Occasionally, you may find specific table functions disabled for security reasons. + Aiven for ClickHouse® supports the following table functions: * ``cluster`` @@ -17,6 +21,7 @@ Aiven for ClickHouse® supports the following table functions: * ``cosn`` * ``deltaLake`` * ``format`` +* ``gcs`` * ``generateRandom`` * ``hudi`` * ``iceberg`` @@ -26,8 +31,8 @@ Aiven for ClickHouse® supports the following table functions: * ``null`` * ``numbers`` * ``numbers_mt`` +* ``oss`` * ``postgresql`` -* ``remote`` * ``remoteSecure`` * ``s3`` * ``s3Cluster`` diff --git a/docs/products/dragonfly.rst b/docs/products/dragonfly.rst new file mode 100644 index 0000000000..5f5e9bdc3c --- /dev/null +++ b/docs/products/dragonfly.rst @@ -0,0 +1,35 @@ +Aiven for Dragonfly® +==================== + +Aiven for Dragonfly is an advanced, **high-scale, and Redis®* compatible in-memory database service** that can be easily deployed in your preferred cloud environment. It provides lightning-fast data storage and retrieval capabilities, making it ideal for businesses that handle large-scale data operations. + +.. important:: + Aiven for Dragonfly® is currently a :doc:`limited availability ` service. If you are interested in exploring this offering, reach out to our sales team at sales@Aiven.io for more information and access. + +------------------------- + +.. grid:: 1 2 2 2 + + .. grid-item-card:: :doc:`Quickstart ` + :shadow: md + :margin: 2 2 0 0 + + Set up your Aiven for Dragonfly service and learn how to connect to it. + + .. grid-item-card:: :doc:`Overview ` + :shadow: md + :margin: 2 2 0 0 + + Explore the unique features and benefits of Aiven for Dragonfly. + + .. grid-item-card:: :doc:`Concepts ` + :shadow: md + :margin: 2 2 0 0 + + Understand the core concepts behind Aiven for Dragonfly's technology. + + .. grid-item-card:: :doc:`How-Tos ` + :shadow: md + :margin: 2 2 0 0 + + Discover step-by-step instructions on how to use Aiven for Dragonfly and tips for various use cases. diff --git a/docs/products/dragonfly/concepts.rst b/docs/products/dragonfly/concepts.rst new file mode 100644 index 0000000000..1a290d717e --- /dev/null +++ b/docs/products/dragonfly/concepts.rst @@ -0,0 +1,6 @@ +Concepts +======== + +Learn more about some of the key concepts for working with Aiven for Dragonfly®: + +.. tableofcontents:: diff --git a/docs/products/dragonfly/concepts/ha-dragonfly.rst b/docs/products/dragonfly/concepts/ha-dragonfly.rst new file mode 100644 index 0000000000..205115d623 --- /dev/null +++ b/docs/products/dragonfly/concepts/ha-dragonfly.rst @@ -0,0 +1,73 @@ +High availability in Aiven for Dragonfly® +========================================== + +Aiven for Dragonfly® offers different plans with varying levels of high availability. The available features depend on the selected plan. Refer to the table below for a summary of these plans: + +.. important:: + Aiven for Dragonfly® is currently a :doc:`limited availability ` service. If you are interested in exploring this offering, reach out to our sales team at sales@Aiven.io for more information and access. + + +.. list-table:: + :header-rows: 1 + :widths: 20 20 30 30 10 + + * - Plan + - Node configuration + - High availability & failover features + - Backup features + - Backup history + * - **Startup** + - Single-node + - Limited availability. No automatic failover. + - During limited availability, only one latest snapshot stored. + - 1 day + * - **Business** + - Two-node (primary + standby) + - High availability with automatic failover to a standby node if the primary fails. + - During limited availability, only one latest snapshot stored. + - 3 days + * - **Premium** + - Three-node (primary + standby + standby) + - Enhanced high availability with automatic failover among multiple standby nodes if the primary fails. + - During limited availability, only one latest snapshot stored. + - 13 days + * - **Custom** + - Custom configurations + - Custom high availability and failover features based on user requirements. + - During limited availability, only one latest snapshot stored. + - Custom based on user requirements + + + +Failure handling +---------------- + +- **Minor failures**: Aiven automatically handles minor failures, such as service process crashes or temporary loss of network access, without any significant changes to the service deployment. In all plans, the service automatically restores regular operation by restarting crashed processes or restoring network access when available. +- **Severe failures**: In case of severe hardware or software problems, such as losing an entire node, more drastic recovery measures are required. Aiven's monitoring infrastructure automatically detects a failing node when it reports problems with its self-diagnostics or stops communicating altogether. The monitoring infrastructure then schedules the creation of a new replacement node. + +.. note:: + + In case of database failover, your service's **Service URI** remains the same—only the IP address changes to point to the new primary node. + +High availability for business, premium, and custom plans +------------------------------------------------------------ + +If a standby Dragonfly node fails, the primary node continues running. The system prepares the replacement standby node and synchronizes it with the primary for normal operations to resume. + +In case the primary Dragonfly node fails, the standby node is evaluated for promotion to the new primary based on data from the Aiven monitoring infrastructure. Once promoted, this node starts serving clients, and a new node is scheduled to become the standby. However, during this transition, there may be a brief service interruption. + +If the primary and standby nodes fail simultaneously, new nodes will be created automatically to replace them. However, this may lead to data loss as the primary node is restored from the latest backup. As a result, any database writes made since the last backup could be lost. + +.. note:: + + The duration for replacing a failed node depends mainly on the **cloud region** and the **amount of data** to be restored. For Business, Premium, and Custom plans with multiple nodes, this process is automatic and requires no administrator intervention, but service interruptions may occur during the recreation of nodes. + + +Single-node startup service plans +---------------------------------------------- + +Losing the only node in the service triggers an automatic process of creating a new replacement node. The new node then restores its state from the latest available backup and resumes serving customers. + +Since there was just a single node providing the service, the service will be unavailable for the duration of the restore operation. All the write operations made since the last backup are lost. + + diff --git a/docs/products/dragonfly/concepts/overview.rst b/docs/products/dragonfly/concepts/overview.rst new file mode 100644 index 0000000000..1c3113b26a --- /dev/null +++ b/docs/products/dragonfly/concepts/overview.rst @@ -0,0 +1,41 @@ +Aiven for Dragonfly® overview +================================ + +Aiven for Dragonfly is an advanced, **high-scale, and Redis®* compatible in-memory database service** that can be deployed in your preferred cloud environment. It provides lightning-fast data storage and retrieval capabilities, making it ideal for businesses that handle large-scale data operations. + +.. important:: + Aiven for Dragonfly® is currently a :doc:`limited availability ` service. If you are interested in exploring this offering, reach out to our sales team at sales@Aiven.io for more information and access. + +Dragonfly is designed to overcome the limitations of Redis Open Source Software (Redis OSS), especially under high-load conditions. While renowned for its speed and adaptability as an in-memory data repository, Redis encounters limitations when handling large-scale data management and achieving high throughput. Dragonfly addresses these challenges by enabling vertical scaling, optimizing hardware resources, and supporting larger memory footprints. + +With Aiven for Dragonfly, businesses can handle workloads exceeding 1TB with more than 10 times the throughput performance of Redis OSS, along with reduced latency. This makes it an ideal solution for enterprises with growing data needs. + + +Features and benefits +--------------------- + +Aiven for Dragonfly offers numerous features and benefits: + +* **Redis compatibility at scale:** It is a seamless drop-in replacement for Redis, capable of handling extensive workloads with enhanced performance. + +* **Optimized for large-scale operations:** Dragonfly is specifically built to address the scalability and resource utilization limitations of Redis Open Source Software (Redis OSS). + +* **Advanced performance:** Dragonfly's unique threading model and shared-nothing architecture allow it to scale vertically, enhancing its performance efficiency, especially in environments with heavy data loads. + + +* **Efficient backup and memory management:** Improved snapshot capabilities lead to more efficient memory usage during backups. + +* **High availability and replication:** It includes active-passive replication and persistence capabilities, ensuring data reliability and consistency. + +* **Ease of integration:** Dragonfly integrates smoothly with existing systems, requiring no code changes, simplifying the adoption process. + +Use cases +--------------------- + +* **Data-intensive enterprises:** Ideal for businesses that necessitate robust, high-performance in-memory data storage and processing capabilities. + +* **Scaling and performance needs:** Perfectly suited for situations where the need for greater scalability and higher throughput goes beyond what Redis OSS can handle. + +Related pages +---------------- +* For detailed information about Dragonfly, refer to `Dragonfly documentation `_. diff --git a/docs/products/dragonfly/get-started.rst b/docs/products/dragonfly/get-started.rst new file mode 100644 index 0000000000..e24fb78d04 --- /dev/null +++ b/docs/products/dragonfly/get-started.rst @@ -0,0 +1,59 @@ +Get started with Aiven for Dragonfly® +======================================= + +The first step in using Aiven for Dragonfly is to create a service. You can do so either using the `Aiven Console `_ or the `Aiven CLI `_. + +.. important:: + Aiven for Dragonfly® is currently a :doc:`limited availability ` service. If you are interested in exploring this offering, reach out to our sales team at sales@Aiven.io for more information and access. + +Create a service using the Aiven Console +---------------------------------------------- +1. Log in to the `Aiven Console `_. + +2. Follow :doc:`these instructions ` to create a new Dragonfly service. + +Once the service is ready, the status changes to *Running*. Depending on your selected cloud provider and region, this generally takes a couple of minutes. + + +Create a service using the Aiven CLI +------------------------------------------------ + +`Aiven CLI `_ provides a simple and efficient way to create an Aiven for Dragonfly® service. If you prefer launching a new service from the CLI, follow these steps: + +1. Determine the service plan, cloud provider, and region you want to use for your Dragonfly service. +2. Run the following command to create Dragonfly service named dragonfly-demo: + +.. code:: + + avn service create dragonfly-demo \ + --service-type dragonfly \ + --cloud google-europe-north1 \ + --plan startup-4 \ + --project dev-sandbox + +.. note:: + There are additional options available to you, which you can view by running the following commands: + + * For a full list of default flags: ``avn service create -h`` + * For type-specific options: ``avn service types -v`` + + +Connect to Aiven for Dragonfly +------------------------------- + +Learn how to connect to Aiven for Dragonfly using different programming languages: + +* :doc:`redis-cli ` +* :doc:`Go ` +* :doc:`Node ` +* :doc:`Python ` + + +Explore other resources +------------------------ + +* Learn about how Aiven for Dragonfly supports :doc:`high availability `. +* Migrate data from :doc:`Aiven for Redis®* to Aiven for Dragonfly `. +* Migrate data from :doc:`external Dragonfly to Aiven for Dragonfly `. + + diff --git a/docs/products/dragonfly/howto.rst b/docs/products/dragonfly/howto.rst new file mode 100644 index 0000000000..9db2f1706e --- /dev/null +++ b/docs/products/dragonfly/howto.rst @@ -0,0 +1,7 @@ +HowTo +===== + +Check out the common tasks for working with Aiven for Dragonfly®: + +.. tableofcontents:: + diff --git a/docs/products/dragonfly/howto/connect-go.rst b/docs/products/dragonfly/howto/connect-go.rst new file mode 100644 index 0000000000..85f7b9f091 --- /dev/null +++ b/docs/products/dragonfly/howto/connect-go.rst @@ -0,0 +1,79 @@ +Connect with Go +================ + +This example demonstrates how to connect to Dragonfly® using Go, using the ``go-redis/redis`` library, which is officially supported with Dragonfly. For more information, see `Dragonfly SDKs `_. + +Variables +---------- + +These are the placeholders you will need to replace in the code sample: + +================== ============================================================= +Variable Description +================== ============================================================= +``DRAGONFLY_URI`` URL for the Dragonfly® connection +================== ============================================================= + +Pre-requisites +--------------- + +First, install the ``go-redis/redis`` library: + +.. code:: + + go get github.com/go-redis/redis/v8 + +Code +------- +Create a new file named ``main.go`` and add the following content, replacing the ``DRAGONFLY_URI`` placeholder with your Dragonfly instance's connection URI: + +.. code:: go + + package main + + import ( + "context" + "fmt" + "github.com/go-redis/redis/v8" + ) + + var ctx = context.Background() + + func main() { + dragonflyURI := "DRAGONFLY_URI" + + opts, err := redis.ParseURL(dragonflyURI) + if err != nil { + panic(err) + } + + rdb := redis.NewClient(opts) + + err = rdb.Set(ctx, "key", "hello world", 0).Err() + if err != nil { + panic(err) + } + + val, err := rdb.Get(ctx, "key").Result() + if err != nil { + panic(err) + } + fmt.Println("The value of key is:", val) + } + +This code connects to Dragonfly, sets a key named ``key`` with the value ``hello world`` (with no expiration), and then retrieves and prints the value of this key. + +Run the code +-------------- + +To run the code, use the following command in your terminal: + +.. code:: + + go run main.go + +If everything is set up correctly, the output should be: + +.. code:: + + The value of key is: hello world diff --git a/docs/products/dragonfly/howto/connect-node.rst b/docs/products/dragonfly/howto/connect-node.rst new file mode 100644 index 0000000000..a430aa47eb --- /dev/null +++ b/docs/products/dragonfly/howto/connect-node.rst @@ -0,0 +1,62 @@ +Connect with NodeJS +===================== + +This example demonstrates how to connect to Dragonfly® from NodeJS using the ``ioredis`` library, which is officially supported and compatible with Dragonfly. For more information, see `Dragonfly SDKs `_. + +Variables +---------- + +Replace the following placeholders in the code sample with the appropriate values: + +================== ============================================================= +Variable Description +================== ============================================================= +``DRAGONFLY_URI`` URL for the Dragonfly connection +================== ============================================================= + +Pre-requisites +--------------- + +Install the ``ioredis`` library: + +.. code:: + + npm install --save ioredis + +Code +----- + +Create a new file named ``index.js``, add the following content and replace the ``DRAGONFLY_URI`` placeholder with your Dragonfly instance's connection URI: + +.. code:: + + const Redis = require('ioredis'); + + const redis = new Redis('DRAGONFLY_URI'); // Replace with your Dragonfly URI + + redis.set('key', 'hello world').then(() => { + return redis.get('key'); + }).then((value) => { + console.log('The value of key is:', value); + process.exit(); + }).catch((err) => { + console.error('Error:', err); + process.exit(1); + }); + +This code connects to Dragonfly, sets a key named ``key`` with the value ``hello world`` (without expiration), then retrieves and prints the value of this key. + +Run the code +-------------- + +To execute the code, use the following command in your terminal: + +.. code:: + + node index.js + +If everything is set up correctly, the output should be: + +.. code:: + + The value of key is: hello world diff --git a/docs/products/dragonfly/howto/connect-python.rst b/docs/products/dragonfly/howto/connect-python.rst new file mode 100644 index 0000000000..36c92f91d6 --- /dev/null +++ b/docs/products/dragonfly/howto/connect-python.rst @@ -0,0 +1,61 @@ +Connect with Python +==================== + +This example demonstrates how to connect to Dragonfly® using Python, using the ``redis-py`` library, which is officially supported by Dragonfly. For more information, see `Dragonfly SDKs `_. + +Variables +----------- + +Replace the following placeholders in the code sample with the appropriate values: + +================== ============================================================= +Variable Description +================== ============================================================= +``DRAGONFLY_URI`` URL for the Dragonfly connection +================== ============================================================= + +Pre-requisites +---------------- + +Install the ``redis-py`` library: + +.. code:: + + pip install redis + +Code +----- + +Create a new file named ``main.py``, add the following content and replace the ``DRAGONFLY_URI`` placeholder with your Dragonfly instance's connection URI: + +.. code:: + + import redis + + # Replace with your Dragonfly URI + r = redis.Redis.from_url('DRAGONFLY_URI') + + r.set('key', 'hello world') + value = r.get('key') + print(f'The value of key is: {value.decode()}') + +This code connects to Dragonfly, sets a key named ``key`` with the value ``hello world`` (without expiration), and then retrieves and prints the value of this key. + +Run the code +-------------- + +To execute the code, use the following command in your terminal: + +.. code:: + + python main.py + +.. note:: + + On some systems, you may need to use ``python3`` instead of ``python`` to invoke ``Python 3``. + +If everything is set up correctly, the output should be: + +.. code:: + + The value of key is: hello world diff --git a/docs/products/dragonfly/howto/connect-redis-cli.rst b/docs/products/dragonfly/howto/connect-redis-cli.rst new file mode 100644 index 0000000000..08e5bf1372 --- /dev/null +++ b/docs/products/dragonfly/howto/connect-redis-cli.rst @@ -0,0 +1,65 @@ +Connect with ``redis-cli`` +=========================== + +This example demonstrates how to connect to Dragonfly® using ``redis-cli``, which supports nearly all the same commands as it does for Redis®. For more information, see `Dragonfly CLI `_. + +Variables +----------- + +Replace the following placeholders in the code sample with the appropriate values: + +================== ============================================================= +Variable Description +================== ============================================================= +``DRAGONFLY_URI`` URL for the Dragonfly connection +================== ============================================================= + +Pre-requisites +------------------ + +Ensure the following before proceeding: + +1. The ``redis-cli`` client installed. This can be installed as part of the Redis®* server installation or as a standalone client. Refer to the `Redis Installation Guide `_ for more information. + +Code +----- + +To connect to Dragonfly, execute the following command in a terminal window: + +.. code:: + + redis-cli -u DRAGONFLY_URI + +This command connects you to your Dragonfly instance. + +To verify the connection is successful, use the ``INFO`` command: + +.. code:: + + INFO + +This command should return various Dragonfly parameters similar to Redis: + +.. code:: text + + # Server + dragonfly_version:1.0.0 + dragonfly_git_sha1:0a1b2c3d + dragonfly_mode:standalone + ... + +To set a key, use the following command: + +.. code:: + + SET mykey mykeyvalue123 + +This should return a confirmation ``OK``. + +To retrieve the set key value, use: + +.. code:: + + GET mykey + +The output will be the value of the key, in this case, ``"mykeyvalue123"``. diff --git a/docs/products/dragonfly/howto/list-code-samples.rst b/docs/products/dragonfly/howto/list-code-samples.rst new file mode 100644 index 0000000000..d933af4383 --- /dev/null +++ b/docs/products/dragonfly/howto/list-code-samples.rst @@ -0,0 +1,6 @@ +Connect to Aiven for Dragonfly® +================================= + +Connect to Aiven for Dragonfly® using various programming languages. + +.. tableofcontents:: diff --git a/docs/products/dragonfly/howto/list-migrate-data.rst b/docs/products/dragonfly/howto/list-migrate-data.rst new file mode 100644 index 0000000000..9f4f784c10 --- /dev/null +++ b/docs/products/dragonfly/howto/list-migrate-data.rst @@ -0,0 +1,4 @@ +Data migration +============== + +.. tableofcontents:: diff --git a/docs/products/dragonfly/howto/migrate-aiven-redis-df-console.rst b/docs/products/dragonfly/howto/migrate-aiven-redis-df-console.rst new file mode 100644 index 0000000000..fe1f8ab089 --- /dev/null +++ b/docs/products/dragonfly/howto/migrate-aiven-redis-df-console.rst @@ -0,0 +1,112 @@ +Migrate Aiven for Redis®* to Aiven for Dragonfly® +============================================================== + +Transition Aiven for Redis® databases seamlessly to Aiven for Dragonfly using the `Aiven Console `_. This article provides detailed instructions for the migration process. + +The Aiven Console migration tool simplifies the process of migrating databases to the Aiven for Dragonfly managed service. + +Compatibility overview +----------------------- +Before migrating an external Redis database to Aiven for Dragonfly, carefully review your current Redis setup. + +* **Review database setup:** Examine your Redis database's data structures, storage patterns, and configurations.Identify any unique features, custom settings, and specific configurations. + +* **API compatibility:** While Dragonfly closely mirrors Redis API commands, there may be variations, especially with newer versions of Redis. For detailed insights on command compatibility, refer to the `Dragonfly API compatibility documentation `_. + + +Prerequisites +------------------------------------------- +Before starting the migration from an Aiven for Redis service: + +* Confirm the Aiven for Redis service is accessible over the Internet. For more information, see :doc:`Public internet access `. +* Make a note of the Aiven project and Aiven for Redis service names for migration in the Aiven Console. + +The Aiven Console migration tool automatically uses connection details like the hostname, port, and credentials linked to the selected Aiven for Redis service. + + +Database migration steps +-------------------------- + +1. Log in to the `Aiven Console `_ and select the Aiven for Dragonfly service to which you want to migrate your Redis database. +2. Navigate to **Service settings** from the sidebar. +3. Scroll to the **Service management** section and use the ellipsis to view additional menu options. +4. Select **Import database** to initiate the import process. + + + +Step 1: Configure +''''''''''''''''''' +Begin the migration process by selecting **Import an Aiven for Redis service**: + +1. From the drop-down menu, select your project name. +2. From the subsequent drop-down, select the Aiven for Redis database you intend to migrate. +3. Click **Get started** to proceed with the migration. + + +Step 2: Validation +'''''''''''''''''''''' +The `Aiven Console `_ will automatically attempt to validate the database configurations for the selected Aiven for Redis service. Click **Run validation** to validate the connection. + +.. warning:: + + If a validation error occurs during migration, follow the on-screen instructions to fix it. Rerun validation to ensure the database meets migration criteria. Note that the migration doesn't include service user accounts and commands in progress. + + +Step 3: Migration +'''''''''''''''''''' +Once all the necessary checks have been completed successfully, you can proceed with the migration process. + +* Click **Start migration** to initiate the data migration process to Aiven for Dragonfly. + + + +Step 4: Replication +'''''''''''''''''''' + +While the migration is in progress: + +* You can close the migration wizard by clicking **Close window** and return later to check the progress. You can keep track of the migration progress by checking the service overview page. +* To stop the migration, clicking **Stop migration**. This action will preserve the data already migrated to Aiven. + + .. important:: + + To prevent conflicts during replication: + + * The target database will be in a read-only state during migration. Writing to the database is only possible once the migration is stopped. + * Do not manually change the replication settings of the source database. + * Avoid making network or configuration changes that could disrupt the ongoing connection between the source and target databases, such as modifying firewall rules or altering trusted sources. + + .. note:: + + If the migration fails, investigate, resolve, and restart the migration using **Start over**. + + + +Step 5: Close and post-migration steps +''''''''''''''''''''''''''''''''''''''''' + +Upon successful migration: + +* **Stop replication**: If no further synchronization is required and you are ready to switch to Aiven for Dragonfly after thoroughly testing the service. + +* **Keep replicating**: If ongoing data synchronization is necessary to maintain active synchronization. + +.. warning:: + + Avoid system updates or configuration changes during active replication to prevent unintentional migrations. + + +.. topic:: Replication mode active? + + Your data is now synchronized to Aiven for Dragonfly, with new writes to the source database being continuously synced. + + +Related pages +--------------- + +* :doc:`Aiven for Redis®* documentation ` +* :doc:`Aiven for Dragonfly documentation ` + + + + diff --git a/docs/products/dragonfly/howto/migrate-ext-redis-df-console.rst b/docs/products/dragonfly/howto/migrate-ext-redis-df-console.rst new file mode 100644 index 0000000000..9229169b09 --- /dev/null +++ b/docs/products/dragonfly/howto/migrate-ext-redis-df-console.rst @@ -0,0 +1,116 @@ +Migrate external Redis®* databases to Aiven for Dragonfly® +=========================================================================== + +Redis is an in-memory data structure store commonly used as a database, cache, message broker, and queue. The Aiven Console provides an intuitive wizard to facilitate the migration of your external Redis databases to Aiven for Dragonfly. + +.. important:: + The migration of databases from Google Cloud Memorystore for Redis is not supported at this time. + +Compatibility overview +----------------------- +Before migrating an external Redis database to Aiven for Dragonfly, carefully review your current Redis setup. + +* **Review database setup:** Examine your Redis database's data structures, storage patterns, and configurations.Identify any unique features, custom settings, and specific configurations. + +* **API compatibility:** While Dragonfly closely mirrors Redis API commands, there may be variations, especially with newer versions of Redis. For detailed insights on command compatibility, refer to the `Dragonfly API compatibility documentation `_. + + + +Prerequisites +------------- + +Before starting the migration process, ensure you have the following: + +* A target Aiven for Dragonfly service set up and ready. For setup instructions, see to :doc:`/docs/products/dragonfly/get-started`. +* Source database information: + + * **Hostname or connection string:** The public hostname, connection string, or IP address used to connect to the database, which should be :doc:`accessible from the public Internet `. + * **Port:** The port number used for connecting to the database. + * **Username:** The username with appropriate permissions for accessing the database data you intend to migrate. + * **Password:** The password associated with the username. + +* Ensure firewalls allow traffic between databases or disable them temporarily. +* Using an SSL-secured connection for data transfer is highly recommended during the source Redis database migration. +* If the source Redis service is not publicly accessible, establish a VPC peering connection between the private networks. You will need the VPC ID and cloud name for the migration. + +.. note:: + Instances such as AWS ElastiCache for Redis that do not have public IP addresses will require a VPC and peering connection to establish a migration. + + +Database migration steps +-------------------------- + +To migrate a Redis database to Aiven for Dragonfly: + +1. Log in to the `Aiven Console `_ and select the Aiven for Dragonfly service to which you want to migrate your Redis database. +2. Navigate to **Service settings** from the sidebar. +3. Scroll to the **Service management** section and use the ellipsis to view additional menu options. +4. Select **Import database** to initiate the import process. + + +Step 1: Configure +`````````````````` +Start by reviewing the database migration configuration guidelines. Confirm compatibility with Dragonfly and proceed as follows: + +* Select **Import an external Redis database**. +* Click **Get started** to begin the migration. + +Step 2: Validation +```````````````````` +Enter the required details to establish a connection with your source Redis database: + +* **Hostname:** The public hostname, connection string, or IP address for the database connection. +* **Port:** The port number used for connections. +* **Username:** The username required to access your database. +* **Password:** The password for database access. +* Select the SSL encryption option for a secure migration and click **Run check** to verify the connection. + +.. important:: + Address any issues that arise to ensure a smooth migration. Note that not all components of your Redis setup will be migrated. User accounts, ACLs, configurations, and active commands or scripts will not be transferred, but all database data and its content will be. + +Step 3: Migration +```````````````````` + +Once all the necessary checks have been completed successfully, you can proceed with the migration process. + +* Select **Start migration** to initiate the data migration process to Aiven for Dragonfly. + +Migration in progress +''''''''''''''''''''''' + +During the migration, you can: + +* Close the migration wizard by clicking **Close window** and later return to monitor the migration status from the service overview page. +* The duration of the migration depends on the size of your database. During migration, the target database will be in a read-only state. Writing to the database is only possible once the migration is stopped. +* Certain managed database features will be disabled while the migration is in progress. +* If needed, halt the migration by selecting **Stop migration**. Previously migrated data will remain on Aiven. + +.. warning:: + + * Stopping this migration will immediately halt the ongoing replication process, preserving the data already transferred to Aiven. You have the option to initiate a new database migration at any time in the future, which will overwrite the entire database and its contents on Aiven with the latest data from the source. + * Avoid actions that could disrupt the replication process, such as changing replication configurations or firewall settings. + +Step 4 - Close and post-migration steps +`````````````````````````````````````````` +Once the migration is complete: + +* Click **Close connection** to end replication. +* Click **Keep replicating** to maintain ongoing data synchronization. + +.. warning:: + + System updates or any configuration changes during replication may restart nodes and trigger a new database migration. Before making any modifications, confirm that replication is either complete or stopped. + +.. topic:: Replication Mode Active? + + Newly added data to the original Redis database will continue to sync with your Aiven for Dragonfly service until you decide to stop replication. + + + + +Related pages +--------------- +* Migrating to Aiven for Dragonfly +* Aiven for Dragonfly documentation ` + + diff --git a/docs/products/dragonfly/reference.rst b/docs/products/dragonfly/reference.rst new file mode 100644 index 0000000000..16914af46f --- /dev/null +++ b/docs/products/dragonfly/reference.rst @@ -0,0 +1,6 @@ +Aiven for Dragonfly® reference +===================================== + +Additional reference information for Aiven for Dragonfly®: + +.. tableofcontents:: diff --git a/docs/products/dragonfly/reference/advanced-params.rst b/docs/products/dragonfly/reference/advanced-params.rst new file mode 100644 index 0000000000..2131883a93 --- /dev/null +++ b/docs/products/dragonfly/reference/advanced-params.rst @@ -0,0 +1,6 @@ +Advanced parameters for Aiven for Dragonfly® +=============================================== + +Below you can find a summary of every configuration option available for Aiven for Dragonfly® service: + +.. include:: /includes/config-dragonfly.rst \ No newline at end of file diff --git a/docs/products/flink.rst b/docs/products/flink.rst index 31d57263e5..abf3fe8366 100644 --- a/docs/products/flink.rst +++ b/docs/products/flink.rst @@ -5,7 +5,7 @@ Aiven for Apache Flink® is a fully managed service that leverages the power of .. grid:: 1 2 2 2 - .. grid-item-card:: :doc:`Quickstart ` + .. grid-item-card:: :doc:`Quickstart ` :shadow: md :margin: 2 2 0 0 diff --git a/docs/products/flink/getting-started.rst b/docs/products/flink/get-started.rst similarity index 100% rename from docs/products/flink/getting-started.rst rename to docs/products/flink/get-started.rst diff --git a/docs/products/flink/howto/create-flink-applications.rst b/docs/products/flink/howto/create-flink-applications.rst index 9520b1de65..33315aa0e2 100644 --- a/docs/products/flink/howto/create-flink-applications.rst +++ b/docs/products/flink/howto/create-flink-applications.rst @@ -1,61 +1,16 @@ -Create an Aiven for Apache Flink® application -============================================== - -:doc:`Aiven for Flink applications ` in Aiven for Apache Flink® servers as a container that includes everything connected to a Flink job, including source and sink connections and data processing logic. The `Aiven Console `_ provides a guided wizard to help you build and deploy applications, simplifying the process of selecting the source and sink tables, writing data transformation statements, and validating and ingesting data through the interactive query feature. - -This article provides the information required to build and deploy applications on Aiven for Apache Flink service. - -.. note:: - You must set up the :doc:`data service integration ` before building applications. - - -Create an application via the Aiven console --------------------------------------------- - -Follow these steps to build your first Aiven for Flink application: - -1. In the `Aiven Console `_, open the Aiven for Apache Flink service for which you want to create an application. -2. Select **Applications** from the left sidebar and select **Create application** to create your first application. -3. In the **Create new application** screen, enter the name of your application and configure the necessary deployment settings. select **Create application**. -4. Select **Create first version** to create the first version of the application. -5. Select **Add your first source table** to add a source table. - - .. note:: - Since this is your first application, there are currently no other applications where you can import source tables. - -6. In the **Add new source table** screen, - - * Select the **Integrated service** from the drop-down list. - * In the **Table SQL** section, enter the statement that will be used to create the table. - * Optionally, select **Run** to view how data is being pulled from the data source. This could take some time based on the data and the connection. - * Select **Add table**. -7. Select **Next** to add the sink table, and then select **Add your first sink table**. - - .. note:: - Since this is your first application, there are currently no other applications where you can import sink tables. - -8. In the **Add new sink table** screen, - - * Select **Integrated service** from the drop-down list. - * In the **Table SQL** section, enter the statement that will be used to create the table. - * Select **Add table**. -9. Select **Next** to enter the **SQL statement** that transforms the data from the source stream. Optionally, select **Run** to view how data is being pulled from the data source. -10. Select **Save and deploy later** to save the application. You will see the application on the landing page that provides you with an overview of your application. - - .. image:: /images/products/flink/application_landingpage_view.png - :scale: 50 % - :alt: Application landing page with an view of source table, SQL statement and sink table - -11. Select **Create deployment**. On the **Create new deployment** screen, - - * Choose the version you wish to deploy. The default version for the first deployment is **Version: 1**. - * Choose the :doc:`savepoint ` from where you want to deploy. No savepoints are available for the first application deployment. - * Use the toggle for **Restart on failure** to enable or disable the option of automatically restarting a Flink job in case it fails. - * Enter the number of `parallel instances `_ you want to have for the task. -12. Select **Deploy without a savepoint** to deploy your application. -13. The deployment status will show **Initializing: version 1** and then **Running: version 1**. - -Your first application is now created and deployed, and you can view the data related to the actions the application needs to perform in your sink source. +Create and manage Aiven for Apache Flink® applications +=========================================================== + +:doc:`Aiven for Flink applications ` in Aiven for Apache Flink® servers as a container that includes everything connected to a Flink job, including source and sink connections and data processing logic. + +Using the `Aiven Console `_, you can create applications that run SQL queries or deploy custom JARs, catering to diverse data processing requirements. The console's guided wizard simplifies the application configuration process, from selecting source and sink tables for SQL applications to uploading and managing JAR files for custom job execution. + +.. important:: + + Custom JARs for Aiven for Apache Flink is a :doc:`limited availability feature `. If you're interested in trying out this feature, contact the sales team at sales@Aiven.io. + +.. tableofcontents:: + diff --git a/docs/products/flink/howto/create-jar-application.rst b/docs/products/flink/howto/create-jar-application.rst new file mode 100644 index 0000000000..a34bf29958 --- /dev/null +++ b/docs/products/flink/howto/create-jar-application.rst @@ -0,0 +1,49 @@ +Create a JAR application +======================== +Aiven for Apache Flink® enables you to upload and deploy custom code as a JAR file, enhancing your Flink applications with advanced data processing capabilities. + +.. important:: + + Custom JARs for Aiven for Apache Flink is a :doc:`limited availability feature `. If you're interested in trying out this feature, contact the sales team at sales@Aiven.io. + +Prerequisite +------------ + +* To enable custom JARs for a new Aiven for Apache Flink service, toggle the feature during service creation. +* For an existing service, in `Aiven Console `_ , select your project and then choose your Aiven for Apache Flink® service. + + * Click **Service settings** on the left sidebar. + * Scroll to the **Advanced configuration** section, and click **Configure**. + * In the **Advanced configuration** screen, click **Add configuration options**, and using the search box find and set ``custom_code`` configuration to **Enabled** position. + +Create and deploy application +--------------------------------- + +1. Access the `Aiven Console `_ and select the Aiven for Apache Flink service where you want to deploy a JAR application. +2. From the left sidebar, click **Applications** and then click **Create application**. +3. In the **Create application** dialog, enter a name for your JAR application, and select **JAR** as the application type from the drop-down. +4. Click **Create application** to proceed. +5. Click **Upload first version** to upload the first version of the application. +6. In the **Upload new version** dialog: + + * Click **Choose file** to select your custom JAR file. + * Select the **Terms of Service** checkbox to indicate your agreement. + * Click **Upload version** to upload your JAR file. + +7. After the upload, you are redirected to the application's overview page. +8. To deploy the application, click **Create deployment**. In the **Create new deployment** dialog: + + * Select the application version to deploy. + * Select a :doc:`savepoint ` if you wish to deploy from a specific state. No savepoints are available for the first application deployment. + * Toggle **Restart on failure** to automatically restart Flink jobs upon failure. + * In the **Program args** field, provide command-line arguments consisting of variables and configurations relevant to your application's logic upon submission. Each argument is limited to 64 characters, with a total limit of 32 separate items. + * Specify the number of `parallel instances `_ you require for the task. + +9. Click **Deploy without a savepoint** to begin the deployment process. +10. While deploying, the application status shows **Initializing**. Once deployed, the status changes to **Running**. + + +Related pages +-------------- + +* :doc:`Manage Aiven for Apache Flink® applications ` diff --git a/docs/products/flink/howto/create-sql-application.rst b/docs/products/flink/howto/create-sql-application.rst new file mode 100644 index 0000000000..72f37aa9db --- /dev/null +++ b/docs/products/flink/howto/create-sql-application.rst @@ -0,0 +1,61 @@ +Create an SQL application +============================ + +In Aiven for Apache Flink®, you can create an SQL application that uses Apache Flink SQL to streamline the process of building data processing pipelines. The SQL application simplifies the process of defining source and sink tables, implementing data processing logic, and managing application deployment. + +Prerequisite +------------ +Before creating applications, configure the :doc:`data service integration ` for seamless integration and data management within your Flink applications. + +Create and deploy application +------------------------------- + +Create an SQL application in Aiven for Apache Flink® using the `Aiven Console `_: + +1. In the `Aiven Console `_, select the Aiven for Apache Flink service where you want to create and deploy a Flink application. +2. From the left sidebar, click **Applications** and then click **Create application**. +3. In the **Create application** dialog, enter the name of your application and select **SQL** as the application type. +4. Click **Create application**. +5. Click **Create first version** to create the first version of the application. +6. Click **Add your first source table** to add a source table. + + .. note:: + As this is your first application, no other applications are available to import source tables. + +7. On the **Add new source table** screen: + + * Use the **Integrated service** drop-down to select the service. + * In the **Table SQL** section, enter the SQL statement to create the source table. + * Optionally, click **Run** to test how data is being retrieved from the data source. This may vary in time based on the data volume and connection speed. + * Click **Add table**. +8. Click **Next** to proceed to adding a sink table and click **Add your first sink table**. + + .. note:: + As this is your first application, no other applications are available to import sink tables. + +9. On the **Add new sink table** screen: + + * Use the **Integrated service** drop-down to select the service. + * In the **Table SQL** section, enter the SQL statement to create the sink table. + * Click **Add table**. +10. Click **Next** to enter the **SQL statement** that transforms the data from the source stream. Optionally, click **Run** to see how the data is extracted from the source. + +11. Click **Save and deploy later** to save the application. You can view and access the application you created on the application overview page. + + .. image:: /images/products/flink/application_landingpage_view.png + :scale: 50 % + :alt: Application landing page with a view of the source table, SQL statement, and sink table + +12. To deploy the application, click **Create deployment**. In the **Create new deployment** dialog: + + * Select the application version to deploy. The default version for the first deployment is **Version: 1**. + * Select a :doc:`savepoint ` if you wish to deploy from a specific state. No savepoints are available for the first application deployment. + * Toggle **Restart on failure** to automatically restart Flink jobs upon failure. + * Specify the number of `parallel instances ` you require for the task. +13. Click **Deploy without a savepoint** to begin the deployment process. +14. While deploying, the application status shows **Initializing**. Once deployed, the status changes to **Running**. + +Create SQL applications using Aiven CLI +------------------------------------------ +For information on creating and managing Aiven for Apache Flink application using :doc:`Aiven CLI `, see :doc:`Manage Aiven for Apache Flink® applications ` document. + diff --git a/docs/products/flink/howto/flink-confluent-avro.rst b/docs/products/flink/howto/flink-confluent-avro.rst index 1e53fd0927..b190132267 100644 --- a/docs/products/flink/howto/flink-confluent-avro.rst +++ b/docs/products/flink/howto/flink-confluent-avro.rst @@ -12,7 +12,7 @@ Prerequisites -------------- * :doc:`Aiven for Apache Flink service ` with Aiven for Apache Kafka® integration. See :doc:`/docs/products/flink/howto/create-integration` for more information. -* Aiven for Apache Kafka® service with Karapace Schema registry enabled. See :doc:`/docs/products/kafka/karapace/getting-started` for more information. +* Aiven for Apache Kafka® service with Karapace Schema registry enabled. See :doc:`/docs/products/kafka/karapace/get-started` for more information. * By default, Flink cannot create Apache Kafka topics while pushing the first record automatically. To change this behavior, enable in the Aiven for Apache Kafka target service the ``kafka.auto_create_topics_enable`` option in **Advanced configuration** section. Create an Apache Flink® table with Confluent Avro diff --git a/docs/products/flink/howto/list-flink-applications.rst b/docs/products/flink/howto/list-flink-applications.rst deleted file mode 100644 index d228344516..0000000000 --- a/docs/products/flink/howto/list-flink-applications.rst +++ /dev/null @@ -1,6 +0,0 @@ -Create and manage Aiven for Apache Flink® applications -====================================================== - -This section will walk you through the steps of setting up and managing your Aiven for Apache Flink® applications. You will learn how to create, configure, and manage your Apache Flink applications. - -.. tableofcontents:: diff --git a/docs/products/flink/howto/manage-flink-applications.rst b/docs/products/flink/howto/manage-flink-applications.rst index d3ab510eae..cd9afb5391 100644 --- a/docs/products/flink/howto/manage-flink-applications.rst +++ b/docs/products/flink/howto/manage-flink-applications.rst @@ -8,21 +8,34 @@ Creating a new version of an application To create a new version of the application deployed, follow these steps: 1. Log in to the `Aiven Console `_, and select your Aiven for Apache Flink® service. -2. Select **Applications** from the left sidebar, and select the application that contains the deployment you want to create a new version. -3. On the application landing screen, select **Create new version**. -4. In the create new version screen, make any desired changes to the create statement, source, or sink tables. -5. Select **Save and deploy later**. You can see the new version listed in the versions drop-down list on the left side of the screen. -6. To deploy the new version of the application, :ref:`stop ` any existing version that is running. -7. Select **Create deployment**, and on the **Create new deployment** screen: - - * Choose the version you want to deploy. - * Choose the savepoint from where you want to deploy. - * Use the toggle for **Restart on failure** to enable or disable the option of automatically restarting a Flink job in case it fails. +2. From the left sidebar, select **Applications**. +3. On the **Applications** landing page, click on the application name for which you want to create a new version. + +For SQL application +````````````````````` +1. Click **Create new version**. +2. In the **Create new version** page, modify the create statement, source, or sink tables as needed. +3. Click **Save and deploy later**. You can see the new version listed in the versions drop-down list. +4. To deploy the new version of the application, :ref:`stop ` any existing version that is running. +5. Click **Create deployment**, and in the **Create new deployment** dialog: + + * Select the version you want to deploy. + * Select the savepoint from where you want to deploy. + * Toggle **Restart on failure** to automatically restart Flink jobs upon failure. * Enter the number of `parallel instances `_ you want to have for the task. + * Click **Deploy from a savepoint** or **Deploy without savepoint** depending on your previous selection. -8. Select **Deploy from a savepoint** or **Deploy without savepoint** depending on your previous selection. +For JAR application +````````````````````` +1. Click **Upload new version**. +2. In the **Upload new version** dialog: + * Click **Choose file** to select your custom JAR file. + * Review and accept the terms of service by checking the box. + * Click **Upload version** to upload your JAR file. +3. In the **Deployment history** you can see the latest version running. + .. _stop-flink-application: Stop application deployment @@ -30,26 +43,24 @@ Stop application deployment To stop a deployment for your Flink application, follow these steps: -1. Select **Applications** from the left sidebar on your Aiven for Apache Flink service, and select the application that contains the deployment you want to stop. -2. On the application landing screen, select **Stop deployment**. -3. On the **Stop deployment** screen, enable the option to **Create a savepoint before stopping** to preserve the current state of the application. - -.. note:: - To stop a deployment without saving the current state of the application, disable the option for **Create a savepoint before stopping** and select **Stop without savepoint**. - -4. Select **Create savepoint & stop** to initiate the stopping process. +1. In your Aiven for Apache Flink service, select **Applications** from the left sidebar. +2. On the **Applications** landing page, click on the application name you want to stop. +3. In the application's overview page, click **Stop deployment**. +4. In the **Stop deployment** dialog, enable the option to **Create a savepoint before stopping** to save the current state of the application. If you want to stop a deployment without saving the current state of the application, disable the option for **Create a savepoint before stopping** and click **Stop without creating savepoint**. +5. Click **Create savepoint & stop** to initiate the stopping process. The application status will display ``Saving_and_stop_requested`` and then ``Finished`` once the stopping process is completed. -Additionally, you can view a history of all the application deployments and statuses by selecting **Deployment history**. +Additionally, the **Deployment history** provides a record of all the application deployments and statuses. Rename application ------------------- To rename an application, follow these steps: -1. Select **Applications** from the left sidebar on your Aiven for Apache Flink service, and select the application you want to rename. -2. On the application landing screen, select the **Application action menu** (ellipsis) located on the right side of the screen, and select **Update application** from the menu options. -3. In the **Update Application** screen, enter the new name for the application and select **Save changes** to confirm the new name and update the application. +1. In your Aiven for Apache Flink service, select **Applications** from the left sidebar. +2. On the **Applications** landing page, click on the application name you want to rename. +3. In the application's overview page, click the **Application action menu (...)** , and click **Update application** from the menu options. +4. In the **Update Application** dialog, enter the new name for the application and select **Save changes** to confirm the new name and update the application. .. _flink-deployment-history: @@ -66,17 +77,18 @@ The **Deployment History** screen provides the following: To view and delete the deployment history of an application, follow these steps: -1. Select **Applications** from the left sidebar on your Aiven for Apache Flink service, and select the application for which you want to view the deployment history. -2. On the application landing screen, select **Deployment History** to view the deployment history. -3. To remove a specific deployment from the history, locate it in the deployment history screen and select the **Delete** icon next to it. +1. In your Aiven for Apache Flink service, select **Applications** from the left sidebar. +2. On the **Applications** landing page, click on the application name for which you want to view the deployment history. +3. In the application landing page, click **Deployment History** to view the deployment history. +4. To remove a specific deployment from the history, locate it in the deployment history page and click the **Delete** icon next to it. -To learn about the different application statuses, see Delete application ------------------- Before deleting an application, it is necessary to remove all associated :ref:`deployment history `. -1. Select **Applications** from the left sidebar on your Aiven for Apache Flink service, and select the application you want to delete. -2. On the application landing screen, select the **Application action menu** (ellipsis) located on the right side of the screen, and select **Delete application** from the menu options. -3. In the **Delete Confirmation** screen, enter the name of the application and select **Confirm** to proceed with the deletion. +1. In your Aiven for Apache Flink service, select **Applications** from the left sidebar. +2. On the **Applications** landing page, click on the application name you want to delete. +3. In the application's overview page, click the **Application action menu (...)**, and click **Delete application** from the menu options. +4. In the **Delete Confirmation** dialog, enter the name of the application and click **Confirm** to proceed with the deletion. diff --git a/docs/products/flink/howto/slack-connector.rst b/docs/products/flink/howto/slack-connector.rst index 57c496ccf8..d07d759d4d 100644 --- a/docs/products/flink/howto/slack-connector.rst +++ b/docs/products/flink/howto/slack-connector.rst @@ -8,7 +8,7 @@ You can access the open-source Slack connect for Apache Flink on Aiven's GitHub Prerequisites ------------- -* Slack app created and ready for use. For more information, refer to the `Set-up Slack Application section `_ on the GitHub repository and the `Slack documentation `_. +* Slack app created and ready for use. For more information, refer to the `Set-up Slack Application section `_ on the GitHub repository and the `Slack documentation `_. * Note the **channel ID** and **token value**, as these will be required in the sink connector Table SQL when configuring the connection in your Flink application. diff --git a/docs/products/grafana/concepts/grafana-features.rst b/docs/products/grafana/concepts/grafana-features.rst index 1d944b10f8..14fc226600 100644 --- a/docs/products/grafana/concepts/grafana-features.rst +++ b/docs/products/grafana/concepts/grafana-features.rst @@ -25,11 +25,10 @@ You can also integrate with popular logs management solutions such as OpenSearch Monitoring with dashboards, plugins, and alerting ------------------------------------------------- -Dashboards and plugins - With Aiven for Grafana, you can monitor your data using ready-made dashboards. Benefit from over 60 advanced panel and :doc:`data source plugins ` that provide a high level of customization to your monitoring solution. -Monitoring and alerting - Aiven for Grafana allows you to create monitoring solutions for all teams, enabling everyone to keep track of critical metrics. Additionally, you can implement an observability platform with alerting, ensuring that you are promptly notified in case of any issues. +- **Dashboards and plugins:** With Aiven for Grafana, you can monitor your data using ready-made dashboards. Benefit from over 60 advanced panel and :doc:`data source plugins ` that provide a high level of customization to your monitoring solution. + +- **Monitoring and alerting:** Aiven for Grafana allows you to create monitoring solutions for all teams, enabling everyone to keep track of critical metrics. Additionally, you can implement an observability platform with alerting, ensuring that you are promptly notified in case of any issues. Automation -------------- diff --git a/docs/products/grafana/howto/dashboard-previews.rst b/docs/products/grafana/howto/dashboard-previews.rst index 3798a8e840..0a2870d4b5 100644 --- a/docs/products/grafana/howto/dashboard-previews.rst +++ b/docs/products/grafana/howto/dashboard-previews.rst @@ -13,22 +13,18 @@ Enable dashboard previews Follow these steps to enable dashboard previews for your Aiven for Grafana service: -1. Log in to the `Aiven Console `_. -2. On the **Services** page, click the Grafana service for which you want to enable dashboard previews. -3. On the **Services overview** page, scroll down to the **Advanced configuration** section. -4. Click the **Change** button. -5. In the **Edit advanced configuration** pop-up screen, turn the toggle on next to ``dashboad_previews_enabled`` to enable the feature. -6. Click the **Save advanced configuration** button. You will notice the status next to ``dashboad_previews_enabled`` change from ``not synced`` to ``synced``. - -.. image:: /images/products/grafana/enable-dashboard-previews.png - :alt: Enable dashboard previews in Advanced configuration - +1. In the `Aiven Console `_, select your project and then choose your Aiven for Grafana® service. +2. In the service page, select **Service settings** from the sidebar. +3. On the **Service settings** page, scroll down to the **Advanced configuration** section, and click **Configure**. +4. In the **Advanced configuration** dialog, click **Add configuration option**. +5. Find and set ``dashboad_previews_enabled`` to **Enabled** position. +6. Click the **Save configuration**. You will notice the status next to ``dashboad_previews_enabled`` change from ``not synced`` to ``synced``. 7. Using the **Service URI**, open the Grafana login page. 8. Enter the username and password, and click **Log in**. -9. Click **Dashboards** on the left side menu, and select the grid layout to view dashboard previews of all the dashboards. Dashboard previews are rendered as thumbnails and can be sorted alphabetically. +9. Click **Dashboards** on the left side menu, and select the grid layout to view dashboard previews of all the dashboards. Dashboard previews are rendered as thumbnails and can be sorted alphabetically. -.. image:: /images/products/grafana/dashboard-previews-on-grafana.png - :alt: Dashboard previews on Grafana + .. image:: /images/products/grafana/dashboard-previews-on-grafana.png + :alt: Dashboard previews on Grafana Limitations ----------- @@ -36,4 +32,4 @@ Limitations * Before downgrading your service plan to Hobbyist or Startup-1, where dashboard previews are unavailable, you need first to disable it on the current service. .. seealso:: - For more information on Dashboard previews, see `Grafana documentation `_. + For more information on Dashboard previews, see `Grafana documentation `_. diff --git a/docs/products/grafana/howto/pitr-process-for-grafana.rst b/docs/products/grafana/howto/pitr-process-for-grafana.rst index 0d80b1973d..1dc9c0d7e5 100644 --- a/docs/products/grafana/howto/pitr-process-for-grafana.rst +++ b/docs/products/grafana/howto/pitr-process-for-grafana.rst @@ -4,14 +4,11 @@ Point-in-time recovery (PITR) process for Aiven for Grafana® The Point-in-Time Recovery (PITR) process allows you to restore your Grafana service using a backup from a specific point in time. When you initiate the restore using the PITR backup for Grafana, a new service will be created to host the restored data. Follow the steps below to perform PITR for Aiven for Grafana: -1. In the Aiven for Grafana Service, select **Backups** from the left sidebar. +1. In the Aiven for Grafana Service, select **Backups** from the left sidebar. 2. Click **Fork & restore**. -.. image:: /images/products/grafana/grafana-pitr-fork-restore.png - :alt: click 'Fork & restore' from backup tab of Grafana service from Aiven console - -3. In the **New Database Fork** screen, +3. In the **New Database Fork** dialog, - Provide a name for the new service. - Verify that the appropriate Project name is chosen. @@ -22,12 +19,12 @@ The Point-in-Time Recovery (PITR) process allows you to restore your Grafana ser - Choose your cloud provider, preferred cloud region, and the service plan accordingly. -.. image:: /images/products/grafana/grafana-pitr-new-db-fork-popup.png - :alt: popup for setting specifics of the service restore. + .. image:: /images/products/grafana/grafana-pitr-new-db-fork-popup.png + :alt: popup for setting specifics of the service restore. 4. Click **Create fork** to create the new forked service. 5. You will be redirected to the **Overview** page of the newly forked service. The service is in the **Rebuilding** status while it is being created. Once the service is ready, the status changes to **Running**. -.. image:: /images/products/grafana/grafana-pitr-after-fork.png - :alt: restore is rebuilding after clicking 'create fork' + .. image:: /images/products/grafana/grafana-pitr-after-fork.png + :alt: restore is rebuilding after clicking 'create fork' diff --git a/docs/products/grafana/howto/replace-expression-string.rst b/docs/products/grafana/howto/replace-expression-string.rst index 36b989207c..b4bf5a1393 100644 --- a/docs/products/grafana/howto/replace-expression-string.rst +++ b/docs/products/grafana/howto/replace-expression-string.rst @@ -48,7 +48,7 @@ To get your API key (``GRAFANA_API_KEY``): * Otherwise, select **Add API key** and fill in the *Key name*, *Role* and *Time to live*. Click **Add** and then save the new API key. - .. tip:: *Role* must be either *Editor* or *Admin*. + .. tip:: *Role* must be either *Editor* or *Admin*. To get the Grafana dashboard URL and UID ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/products/grafana/howto/send-emails.rst b/docs/products/grafana/howto/send-emails.rst index 21336f3e3b..0a9b8ff32f 100644 --- a/docs/products/grafana/howto/send-emails.rst +++ b/docs/products/grafana/howto/send-emails.rst @@ -34,18 +34,22 @@ To configure the Aiven for Grafana service: avn user login --token -2. configure the service using your own SMTP values:: +2. configure the service using your own SMTP values: - avn service update --project yourprojectname yourservicename \ - -c smtp_server.host=smtp.example.com \ - -c smtp_server.port=465 \ - -c smtp_server.username=emailsenderuser \ - -c smtp_server.password=emailsenderpass \ - -c smtp_server.from_address="grafana@yourcompany.com" - -3. (optionally) Review all available custom options, and configure as needed:: - - avn service types -v + .. code:: + + avn service update --project yourprojectname yourservicename \ + -c smtp_server.host=smtp.example.com \ + -c smtp_server.port=465 \ + -c smtp_server.username=emailsenderuser \ + -c smtp_server.password=emailsenderpass \ + -c smtp_server.from_address="grafana@yourcompany.com" + +3. (optionally) Review all available custom options, and configure as needed: + + .. code:: + + avn service types -v You have now set up your Aiven for Grafana to send emails. diff --git a/docs/products/grafana/reference/plugins.rst b/docs/products/grafana/reference/plugins.rst index 2e3360d1b6..c1e27ec5fd 100644 --- a/docs/products/grafana/reference/plugins.rst +++ b/docs/products/grafana/reference/plugins.rst @@ -5,210 +5,81 @@ The following plugins are available in all installations of Aiven for Grafana®, Panel plugins ------------- -AJAX - `Grafana `__ | `GitHub `__ - A general way to load external content into a Grafana dashboard. -Alert list - `Grafana `__ | `Grafana Docs `__ - Allows you to display alerts on a dashboard. The list can be configured to show either the current state of your alerts or recent alert state changes. - -Annotation list - `Grafana `__ | `GitHub `__ - Shows user annotations in Grafana database. - -Bar chart - `Grafana Docs `__ - Allows you to graph categorical data. - -Bar gauge - `Grafana Docs `__ - Simplifies your data by reducing every field to a single value. You choose how Grafana calculates the reduction. This panel can show one or more bar gauges depending on how many series, rows, or columns your query returns. - -Candlestick - `Grafana Docs `__ - Shows a chart that is typically used to describe price movements of a security, derivative, or currency. - -Canvas - `Grafana `__ - Included with Grafana. Canvas visualizations are extensible form-built panels that allow you to explicitly place elements within static and dynamic layouts. Similar to UI and web design tools you may have used before. - -Carpet plot - `Grafana `__ - Receives data series and divides all the data into individual buckets. - -Clock - `Grafana `__ | `GitHub `__ - Shows the current time or a countdown and updates every second. - -D3 Gauge - `Grafana `__ | `GitHub `__ - Provides a D3-based gauge panel for Grafana 6.x/7.x - -Dashboard list - `Grafana `__ | `Grafana Docs `__ - Allows you to display dynamic links to other dashboards. The list can be configured to use starred dashboards, a search query and/or dashboard tags. - -Diagram - `Grafana `__ | `GitHub `__ - Provides a way to create flow-charts, sequence diagrams, and Gantt charts by leveraging the mermaid.js library. - -Discrete - `Grafana `__ | `GitHub `__ - Shows discrete values in a horizontal graph, plotting state transitions. - -FlowCharting - `Grafana `__ | `Docs `__ - Displays complex diagrams using the online graphing library draw.io. - -Gauge - Standard gauge visualization. - -Geomap - Included with Grafana. - -Getting Started - Included with Grafana. - -Graph - `Grafana Docs `__ - Included with Grafana, provides a very rich set of graphing options. - -Heatmap - `Grafana `__ | `Grafana Docs `__ - Allows you to view histograms over time. The legacy Heatmap plugin is also still available. - -Histogram - `Grafana `__ | `GitHub `__ - Provides a histogram for time series data. - -Logs - Included with Grafana. - -News - `Grafana Docs `__ - This panel visualization displays an RSS feed. By default, it displays articles from the Grafana Labs blog. - -Node Graph - Included with Grafana. - -Panel / Plotly - `GitHub `__ - Render metrics using the plot.ly JavaScript framework. - -Pie Chart - `Grafana `__ | `GitHub `__ - Adding pie charts to your dashboard. - -Plugin list - `Grafana `__ - Included with Grafana. - -Singlestat Math - `Grafana `__ - A modification of the native single stat panel to support math functions across series. - -Stat - `Grafana Docs `__ - Included with Grafana, allows you to show the one main summary stat of a SINGLE series. - -State timeline - Included with Grafana. - -Status history - Provides a periodic status history. - -Status Panel - `Grafana `__ | `GitHub `__ - Use as a centralized view for the status of component in a glance. - -Statusmap - `Grafana `__ | `GitHub `__ - Shows discrete statuses of multiple targets over time. - -Table - `Grafana `__ - Supports both multiple modes for time series as well as for table, annotation and raw JSON data. It also provides date formatting and value formatting and coloring options. - -Text - `Grafana `__ - A simple panel that displays text. The source text is written in markdown so that you can format it. - -Time series - Allows for visualizing time series data. - -Welcome - Included with Grafana. - -Worldmap Panel - `GitHub `__ - Displays time series data or geohash data from Elasticsearch overlaid on a world map. - -XY Chart - `Grafana `__ - Native plugin that supports arbitrary X vs Y in graphs. +- AJAX - `Grafana `__, `GitHub `__: A general way to load external content into a Grafana dashboard. +- Alert list - `Grafana `__, `Grafana Docs `__: Allows you to display alerts on a dashboard. The list can be configured to show either the current state of your alerts or recent alert state changes. +- Annotation list - `Grafana `__, `GitHub `__: Shows user annotations in Grafana database. +- Bar chart - `Grafana Docs `__: Allows you to graph categorical data. +- Bar gauge - `Grafana Docs `__: Simplifies your data by reducing every field to a single value. You choose how Grafana calculates the reduction. This panel can show one or more bar gauges depending on how many series, rows, or columns your query returns. +- Candlestick - `Grafana Docs `__: Shows a chart that is typically used to describe price movements of a security, derivative, or currency. +- Canvas - `Grafana `__: Included with Grafana. Canvas visualizations are extensible form-built panels that allow you to explicitly place elements within static and dynamic layouts. Similar to UI and web design tools you may have used before. +- Carpet plot - `Grafana `__: Receives data series and divides all the data into individual buckets. +- Clock - `Grafana `__, `GitHub `__: Shows the current time or a countdown and updates every second. +- D3 Gauge - `Grafana `__, `GitHub `__: Provides a D3-based gauge panel for Grafana 6.x/7.x +- Dashboard list - `Grafana `__, `Grafana Docs `__: Allows you to display dynamic links to other dashboards. The list can be configured to use starred dashboards, a search query and/or dashboard tags. +- Diagram - `Grafana `__, `GitHub `__: Provides a way to create flow-charts, sequence diagrams, and Gantt charts by leveraging the mermaid.js library. +- Discrete - `Grafana `__, `GitHub `__: Shows discrete values in a horizontal graph, plotting state transitions. +- FlowCharting - `Grafana `__, `Docs `__: Displays complex diagrams using the online graphing library draw.io. +- Gauge: Standard gauge visualization. +- Geomap: Included with Grafana. +- Getting Started : Included with Grafana. +- Graph - `Grafana Docs `__: Included with Grafana, provides a very rich set of graphing options. +- Heatmap - `Grafana `__, `Grafana Docs `__: Allows you to view histograms over time. The legacy Heatmap plugin is also still available. +- Histogram - `Grafana `__, `GitHub `__: Provides a histogram for time series data. +- Logs: Included with Grafana. +- News - `Grafana Docs `__: This panel visualization displays an RSS feed. By default, it displays articles from the Grafana Labs blog. +- Node Graph: Included with Grafana. +- Panel / Plotly - `GitHub `__: Render metrics using the plot.ly JavaScript framework. +- Pie Chart - `Grafana `__, `GitHub `__: Adding pie charts to your dashboard. +- Plugin list - `Grafana `__: Included with Grafana. +- Singlestat Math - `Grafana `__: A modification of the native single stat panel to support math functions across series. +- Stat - `Grafana Docs `__: Included with Grafana, allows you to show the one main summary stat of a SINGLE series. +- State timeline: Included with Grafana. +- Status history: Provides a periodic status history. +- Status Panel - `Grafana `__, `GitHub `__: Use as a centralized view for the status of component in a glance. +- Statusmap - `Grafana `__, `GitHub `__: Shows discrete statuses of multiple targets over time. +- Table - `Grafana `__: Supports both multiple modes for time series as well as for table, annotation and raw JSON data. It also provides date formatting and value formatting and coloring options. +- Text - `Grafana `__: A simple panel that displays text. The source text is written in markdown so that you can format it. +- Time series: Allows for visualizing time series data. +- Welcome: Included with Grafana. +- Worldmap Panel - `GitHub `__: Displays time series data or geohash data from Elasticsearch overlaid on a world map. +- XY Chart - `Grafana `__: Native plugin that supports arbitrary X vs Y in graphs. Data source plugins ------------------- -Altinity plugin for ClickHouse® - `GitHub `_ - Provides support for ClickHouse® as a backend database. - -Azure Monitor - `Grafana `__ | `GitHub `__ - Provides a single source for monitoring Azure resources. - -CloudWatch - `Grafana `__ | `Grafana Docs `__ - Build dashboards for your CloudWatch metrics. - -Elasticsearch - `Grafana `__ | `Grafana Docs `__ - Performs Elasticsearch queries to visualize logs or metrics stored in Elasticsearch. Annotate your graphs with log events stored in Elasticsearch. - -GitHub - `GitHub `__ - Allows GitHub API data to be visually represented in Grafana dashboards. - -Google BigQuery - `GitHub `__ - Provides support for BigQuery as a backend database. - -Google Sheets - `Grafana `__ | `GitHub `__ - Visualize your Google Spreadsheets in Grafana. - -Graphite - `Grafana `__ | `Grafana Docs `__ - Quickly navigate the metric space, add functions, change function parameters and more. - -InfluxDB® - `Grafana `__ | `Grafana Docs `__ - -Instana - `Grafana `__ | `GitHub `__ - Shows metrics from Instana AI-Powered APM for dynamic applications. - -Jaeger - Open source, end-to-end distributed tracing. - -Loki - A built in data source that allows you to connect to the Loki logging service. - -Microsoft SQL Server - Grafana ships with a built-in Microsoft SQL Server (MSSQL) data source plugin that allows you to query and visualize data from any Microsoft SQL Server 2005 or newer. - -MySQL - `Grafana `__ | `Grafana Docs `__ - Allows you to query any visualize data from a MySQL compatible database. - -OpenSearch® - `Grafana `__ - Runs many types of simple or complex OpenSearch queries to visualize logs or metrics stored in OpenSearch. Annotate your graphs with log events stored in OpenSearch. - -OpenTSDB - `Grafana `__ | `Grafana Docs `__ - OpenTSDB is a scalable, distributed time series database. - -Pagerduty - `Grafana `__ | `GitHub `__ - Annotations-only datasource for Pagerduty events. - -PostgreSQL® - `Grafana `__ | `Grafana Docs `__ - Allows you to query and visualize data from a PostgreSQL compatible database. - -Prometheus - `Grafana `__ | `Grafana Docs `__ - Work with the open-source service monitoring system and time series database. - -Prometheus AlertManager - `GitHub `__ - Allows you to use the AlertManager API of Prometheus to create dashboards in Grafana. - -SimpleJson - `Grafana `__ | `GitHub `__ - -Stackdriver / Google Cloud Monitoring - Data source for Google's monitoring service (formerly named, and listed as, Stackdriver). - -Tempo - High volume, minimal dependency trace storage. OSS tracing solution from Grafana Labs. - -TestData DB - Generates test data in different forms. - -Zipkin - Data source for the distributed tracing system. +- Altinity plugin for ClickHouse® - `GitHub `_: Provides support for ClickHouse® as a backend database. +- Azure Monitor - `Grafana `__, `GitHub `__: Provides a single source for monitoring Azure resources. +- CloudWatch - `Grafana `__, `Grafana Docs `__: Build dashboards for your CloudWatch metrics. +- Elasticsearch - `Grafana `__, `Grafana Docs `__: Performs Elasticsearch queries to visualize logs or metrics stored in Elasticsearch. Annotate your graphs with log events stored in Elasticsearch. +- GitHub - `GitHub `__: Allows GitHub API data to be visually represented in Grafana dashboards. +- Google BigQuery - `GitHub `__: Provides support for BigQuery as a backend database. +- Google Sheets - `Grafana `__, `GitHub `__: Visualize your Google Spreadsheets in Grafana. +- Graphite - `Grafana `__, `Grafana Docs `__: Quickly navigate the metric space, add functions, change function parameters and more. +- InfluxDB® - `Grafana `__, `Grafana Docs `__ +- Instana - `Grafana `__, `GitHub `__: Shows metrics from Instana AI-Powered APM for dynamic applications. +- Jaeger: Open source, end-to-end distributed tracing. +- Loki: A built in data source that allows you to connect to the Loki logging service. +- Microsoft SQL Server: Grafana ships with a built-in Microsoft SQL Server (MSSQL) data source plugin that allows you to query and visualize data from any Microsoft SQL Server 2005 or newer. +- MySQL - `Grafana `__, `Grafana Docs `__: Allows you to query any visualize data from a MySQL compatible database. +- OpenSearch® - `Grafana `__: Runs many types of simple or complex OpenSearch queries to visualize logs or metrics stored in OpenSearch. Annotate your graphs with log events stored in OpenSearch. +- OpenTSDB - `Grafana `__, `Grafana Docs `__: OpenTSDB is a scalable, distributed time series database. +- Pagerduty - `Grafana `__, `GitHub `__: Annotations-only datasource for Pagerduty events. +- PostgreSQL® - `Grafana `__, `Grafana Docs `__: Allows you to query and visualize data from a PostgreSQL compatible database. +- Prometheus - `Grafana `__, `Grafana Docs `__: Work with the open-source service monitoring system and time series database. +- Prometheus AlertManager - `GitHub `__: Allows you to use the AlertManager API of Prometheus to create dashboards in Grafana. +- SimpleJson - `Grafana `__, `GitHub `__ +- Stackdriver / Google Cloud Monitoring: Data source for Google's monitoring service (formerly named, and listed as, Stackdriver). +- Tempo: High volume, minimal dependency trace storage. OSS tracing solution from Grafana Labs. +- TestData DB: Generates test data in different forms. +- Zipkin: Data source for the distributed tracing system. Other ----- -Grafana Image Renderer (Renderer) - `Grafana `__ | `GitHub `__ - Handles rendering panels and dashboards to PNGs using a headless browser (Chromium). - -Traces (Application) - `Grafana `__ - Grafana Enterprise Traces (GET) is a commercial offering based on Tempo, and allows you to deploy a highly-scalable, simple, and reliable traces cluster in your own data center. - -worldPing - `GitHub `__ - Continually tests, stores and alerts on the global performance and availability of your Internet applications so you can pinpoint issues. - -Zabbix (Application) - `Grafana `__ | `GitHub `__ - Visualizes your Zabbix metrics. ------- +- Grafana Image Renderer (Renderer) - `Grafana `__, `GitHub `__: Handles rendering panels and dashboards to PNGs using a headless browser (Chromium). +- Traces (Application) - `Grafana `__: Grafana Enterprise Traces (GET) is a commercial offering based on Tempo, and allows you to deploy a highly-scalable, simple, and reliable traces cluster in your own data center. +- worldPing - `GitHub `__: Continually tests, stores and alerts on the global performance and availability of your Internet applications so you can pinpoint issues. +- Zabbix (Application) - `Grafana `__, `GitHub `__: Visualizes your Zabbix metrics. *Elasticsearch is a trademark of Elasticsearch B.V., registered in the U.S. and in other countries.* diff --git a/docs/products/influxdb.rst b/docs/products/influxdb.rst index ee4643e914..f46c114fa2 100644 --- a/docs/products/influxdb.rst +++ b/docs/products/influxdb.rst @@ -1,16 +1,9 @@ Aiven for InfluxDB® =================== -What is Aiven for InfluxDB®? ----------------------------- - Aiven for InfluxDB® is a fully managed high-performance time series database designed for highly variable data, deployable in the cloud of your choice. It can ingest and query hundreds of thousands of data points a second. - -Why InfluxDB®? --------------- - -InfluxDB is a great time series data solution. It is known for its ability to handle variable data sets and query past data. And with Aiven, you can deploy InfluxDB in minutes and enhance your existing architecture. +With Aiven, you can deploy InfluxDB in minutes and enhance your existing architecture. Get started with Aiven for InfluxDB® ------------------------------------- diff --git a/docs/products/kafka.rst b/docs/products/kafka.rst index b5c581a86c..c634a3f5ff 100644 --- a/docs/products/kafka.rst +++ b/docs/products/kafka.rst @@ -1,19 +1,10 @@ Aiven for Apache Kafka® ======================= -What is Aiven for Apache Kafka®? --------------------------------- - Aiven for Apache Kafka® is a fully managed **distributed data streaming platform**, deployable in the cloud of your choice. Apache Kafka is an open source data streaming platform, ideal for event-driven applications, near-real-time data transfer and pipelines, stream analytics, and many more applications where a lot of data needs to move between applications in a speedy manner. Kafka stores a potentially large number of records, each contains a small amount of data, usually for a limited period of time. The storage is organised into "topics" and "partitions" so that many data streams can be handled at once, regardless of how much data is flowing into or out of your Aiven for Apache Kafka service. - -Why Apache Kafka? ------------------ - -Apache Kafka itself is technically a distributed log storage mechanism; in reality it is a best-in-class, highly-available data streaming solution. Oh, and it just happens to have an incredibly rich ecosystem of open source tooling that connects to and extends the existing platform. - Aiven for Apache Kafka® MirrorMaker 2 ''''''''''''''''''''''''''''''''''''' @@ -27,7 +18,7 @@ Apache Kafka moves data between systems, and Apache Kafka Connect is how to inte Get started with Aiven for Apache Kafka --------------------------------------- -Take your first steps with Aiven for Apache Kafka by following our :doc:`/docs/products/kafka/getting-started` article, or browse through our full list of articles: +Take your first steps with Aiven for Apache Kafka by following our :doc:`/docs/products/kafka/get-started` article, or browse through our full list of articles: .. grid:: 1 2 2 2 diff --git a/docs/products/kafka/concepts.rst b/docs/products/kafka/concepts.rst index 3ba38b4492..5e26432225 100644 --- a/docs/products/kafka/concepts.rst +++ b/docs/products/kafka/concepts.rst @@ -1,6 +1,83 @@ -Concepts -======== +Apache Kafka® concepts +====================== -Explanations and supporting concepts to help you succeed with Aiven for Apache Kafka®. +A comprehensive glossary of essential Apache Kafka® terms and their meanings. -.. tableofcontents:: +.. _Broker: + +Broker +------ + +A server that operates Apache Kafka, responsible for message storage, processing, and delivery. Typically part of a cluster for enhanced scalability and reliability, each broker functions independently but is integral to Kafka's overall operations, separate from tools like Apache Kafka Connect. + +Consumer +-------- + +An application that reads data from Apache Kafka, often processing or acting upon it. Various tools used with Apache Kafka ultimately function as either a producer or a consumer when communicating with Apache Kafka. + +Consumer groups +--------------- + +Groups of consumers in Apache Kafka are used to scale beyond a single application instance. Multiple instances of an application coordinate to handle messages, with each group allocated to different partitions for even workload distribution. + +Event-driven architecture +------------------------- + +Application architecture centered around responding to and processing events. + +.. _Event: + +Event +----- + +A single discrete data unit in Apache Kafka, consisting of a ``value`` (the message body) and often a ``key`` (for quick identification) and ``headers`` (metadata about the message). + +Kafka node +---------- + +See :ref:`Broker` + +Kafka server +------------ + +See :ref:`Broker` + +Message +------- + +See :ref:`Event` + +Partitioning +------------ + +A method used by Apache Kafka to distribute a topic across multiple servers. Each server acts as the ``leader`` for a partition, ensuring data sharding and message order within each partition. + +Producer +-------- + +An application that writes data into Apache Kafka without concern for the data's consumers. The data can range from well-structured to simple text, often accompanied by metadata. + +Pub/sub +------- + +A publish-subscribe messaging architecture where messages are broadcasted by publishers and received by any listening subscribers, unlike point-to-point systems. + +Queueing +-------- + +A messaging system where messages are sent and received in the order they are produced. Apache Kafka maintains a watermark for each consumer to track the most recent message read. + +Record +------ + +See :ref:`Event` + +Replication +----------- + +Apache Kafka's feature for data replication across multiple servers, ensuring data preservation even if a server fails. This is configurable per topic. + +Topic +----- + +Logical channels in Apache Kafka through which messages are organized. Topics are named in a human-readable manner, like ``sensor-readings`` or ``kubernetes-logs``. diff --git a/docs/products/kafka/concepts/acl.rst b/docs/products/kafka/concepts/acl.rst index b94f78614b..352e38787f 100644 --- a/docs/products/kafka/concepts/acl.rst +++ b/docs/products/kafka/concepts/acl.rst @@ -1,7 +1,7 @@ Access control lists and permission mapping ============================================ -Aiven for Apache Kafka® uses **access control lists** (ACL) and user definitions to establish individual rights to produce, consume or manage topics. To manage users and ACL entries, you can access the corresponding options in the left-side navigation menu on the service page within the `Aiven Console `_. For detailed instructions, see :doc:`Manage users and access control lists <../howto/manage-acls>`. +Aiven for Apache Kafka® uses **access control lists** (ACL) and user definitions to establish individual rights to produce, consume or manage topics. To manage users and ACL entries, you can access the corresponding options in the left-side navigation menu on the service page within the `Aiven Console `_. For detailed instructions, see :doc:`Manage users and access control lists `. ACL structure ------------- diff --git a/docs/products/kafka/concepts/auth-types.rst b/docs/products/kafka/concepts/auth-types.rst index 4547f65cb3..d677bc984f 100644 --- a/docs/products/kafka/concepts/auth-types.rst +++ b/docs/products/kafka/concepts/auth-types.rst @@ -67,7 +67,7 @@ This identity is then by default stored in Apache ZooKeeper™. Enable SASL authentication --------------------------- -Follow the steps from our article :doc:`to enable SASL authentication <../howto/kafka-sasl-auth>` +Follow the steps from our article :doc:`to enable SASL authentication ` ----- diff --git a/docs/products/kafka/concepts/configuration-backup.rst b/docs/products/kafka/concepts/configuration-backup.rst index 547713d10c..4cc5038a61 100644 --- a/docs/products/kafka/concepts/configuration-backup.rst +++ b/docs/products/kafka/concepts/configuration-backup.rst @@ -23,8 +23,8 @@ Some of the key benefits of configuration backups include the following: * Configurations are backed up in 3 hours intervals. * It helps with speedy disaster recovery in certain situations. -.. note:: - In a rare scenario where all the nodes are lost, the configurations are lost and not accessible anymore. + .. note:: + In a rare scenario where all the nodes are lost, the configurations are lost and not accessible anymore. * It helps application users with a quick re-creation of Apache Kafka® services, allowing them to focus on development tasks rather than re-configuring the platform. diff --git a/docs/products/kafka/concepts/consumer-lag-predictor.rst b/docs/products/kafka/concepts/consumer-lag-predictor.rst index e9b4cfac5f..7c2fe201b9 100644 --- a/docs/products/kafka/concepts/consumer-lag-predictor.rst +++ b/docs/products/kafka/concepts/consumer-lag-predictor.rst @@ -4,9 +4,10 @@ Consumer lag predictor for Aiven for Apache Kafka® The **consumer lag predictor** for Aiven for Apache Kafka estimates the delay between the time a message is produced and when it's eventually consumed by a consumer group. This information can be used to improve the performance, scalability, and cost-effectiveness of your Kafka cluster. .. important:: - Consumer Lag Predictor for Aiven for Apache Kafka® is a limited availability feature. If you're interested in trying out this feature, contact the sales team at sales@Aiven.io. -To use the **consumer lag predictor** effectively, setting up :doc:`Prometheus integration ` with your Aiven for Apache Kafka® service is essential. Prometheus integration enables the extraction of key metrics necessary for lag prediction and monitoring. + Consumer Lag Predictor for Aiven for Apache Kafka® is a limited availability feature. If you're interested in trying out this feature, contact the sales team at sales@Aiven.io. + +To use the **consumer lag predictor** effectively, set up :doc:`Prometheus integration ` with your Aiven for Apache Kafka® service. The Prometheus integration enables the extraction of key metrics necessary for lag prediction and monitoring. Why use consumer lag predictor? --------------------------------- diff --git a/docs/products/kafka/concepts/kafka-quotas.rst b/docs/products/kafka/concepts/kafka-quotas.rst index 16bafcbce7..8297e60cb0 100644 --- a/docs/products/kafka/concepts/kafka-quotas.rst +++ b/docs/products/kafka/concepts/kafka-quotas.rst @@ -1,7 +1,7 @@ Quotas in Aiven for Apache Kafka® ==================================== -Quotas ensure fair resource allocation, stability, and efficiency in your Kafka cluster. In Aiven for Apache Kafka®, you can :doc:`add quotas <../howto/manage-quotas>` to limit the data or requests exchanged by producers and consumers within a specific period, preventing issues like broker overload, network congestion, and service disruptions caused by excessive or malicious traffic. You can effectively manage resource consumption and ensure optimal user performance by implementing quotas. You can add and manage quotas using `Aiven Console `_ and `Aiven API `_. +Quotas ensure fair resource allocation, stability, and efficiency in your Kafka cluster. In Aiven for Apache Kafka®, you can :doc:`add quotas ` to limit the data or requests exchanged by producers and consumers within a specific period, preventing issues like broker overload, network congestion, and service disruptions caused by excessive or malicious traffic. You can effectively manage resource consumption and ensure optimal user performance by implementing quotas. You can add and manage quotas using `Aiven Console `_ and `Aiven API `_. Using quotas offer several benefits: @@ -25,11 +25,11 @@ Client ID and users in quotas -------------------------------- **Client ID** and **User** are two types of entities that can be used to enforce quotas in Kafka. -**Client ID** - A Client ID is a unique identifier assigned to each client application or producer/consumer instance that connects to a Kafka cluster. It helps track the activity and resource usage of individual clients. When configuring quotas, you can set limits based on the Client ID, allowing you to control the amount of resources (such as network bandwidth or CPU) a specific client can utilize. +**Client ID:** +A Client ID is a unique identifier assigned to each client application or producer/consumer instance that connects to a Kafka cluster. It helps track the activity and resource usage of individual clients. When configuring quotas, you can set limits based on the Client ID, allowing you to control the amount of resources (such as network bandwidth or CPU) a specific client can utilize. -**Users** - A User represents the authenticated identity of a client connecting to a cluster. With authentication mechanisms like SASL, users are associated with specific connections. By setting quotas based on Users, resource limits can be enforced per-user. +**Users:** +A User represents the authenticated identity of a client connecting to a cluster. With authentication mechanisms like SASL, users are associated with specific connections. By setting quotas based on Users, resource limits can be enforced per-user. Quotas enforcement ------------------- @@ -39,10 +39,7 @@ When a client exceeds its quota, the broker calculates the necessary delay to br Quota violations are swiftly detected using short measurement windows, typically 30 windows of 1 second each. This ensures timely correction and prevents bursts of traffic followed by long delays, providing a better user experience. -For more information, refer to `Enforcement `_ in the Apache Kafka® official documentation. - -.. seealso:: - * :doc:`How to add and manage quotas <../howto/manage-quotas>` +For more information, see `Enforcement `_ in the Apache Kafka® official documentation and :doc:`How to add and manage quotas ` Further reading diff --git a/docs/products/kafka/concepts/kafka-tiered-storage.rst b/docs/products/kafka/concepts/kafka-tiered-storage.rst index befb5fecd8..9c2b5474e6 100644 --- a/docs/products/kafka/concepts/kafka-tiered-storage.rst +++ b/docs/products/kafka/concepts/kafka-tiered-storage.rst @@ -5,11 +5,13 @@ Tiered storage in Aiven for Apache Kafka® enables more effective data managemen .. important:: - Aiven for Apache Kafka® tiered storage is a :doc:`limited availability feature `. If you're interested in trying out this feature, contact the sales team at sales@Aiven.io. + Aiven for Apache Kafka® tiered storage is an :doc:`early availability feature `. If you're interested in trying out this feature, contact the sales team at sales@Aiven.io. .. note:: + - Tiered storage for Aiven for Apache Kafka® is supported starting from Apache Kafka® version 3.6. + - Tiered storage for Aiven for Apache Kafka® is not available for startup-2 plans. @@ -41,7 +43,7 @@ Pricing Tiered storage costs are determined by the amount of remote storage used, measured in GB/hour. The highest usage level within each hour is the basis for calculating charges. -Related reading +Related pages ---------------- * :doc:`How tiered storage works in Aiven for Apache Kafka® ` diff --git a/docs/products/kafka/concepts/log-compaction.rst b/docs/products/kafka/concepts/log-compaction.rst index 19538e7e51..6b6a21ab52 100644 --- a/docs/products/kafka/concepts/log-compaction.rst +++ b/docs/products/kafka/concepts/log-compaction.rst @@ -3,7 +3,7 @@ Compacted topics One way to reduce the disk space requirements in Apache Kafka® is to use **compacted topics**. This methodology retains only the newest record for each key on a topic, regardless of whether the retention period of the message has expired or not. Depending on the application, this can significantly reduce the amount of storage required for the topic. -To make use of log compaction, all messages sent to the topic must have an explicit key. To enable log compaction, follow the steps described in :doc:`how to configure log cleaner <../howto/configure-log-cleaner>`. +To make use of log compaction, all messages sent to the topic must have an explicit key. To enable log compaction, follow the steps described in :doc:`how to configure log cleaner `. How compacted topics work diff --git a/docs/products/kafka/concepts/monitor-consumer-group.rst b/docs/products/kafka/concepts/monitor-consumer-group.rst index 0d4d0c3f34..eb1f719aed 100644 --- a/docs/products/kafka/concepts/monitor-consumer-group.rst +++ b/docs/products/kafka/concepts/monitor-consumer-group.rst @@ -8,7 +8,7 @@ This section builds on the :doc:`service integrations ` allows you to authenticate the user, and control read or write access to the individual resources available in the Schema Registry. +The schema registry authorization feature when enabled in :doc:`Karapace schema registry ` allows you to authenticate the user, and control read or write access to the individual resources available in the Schema Registry. For information on schema registry authorization for Aiven for Apache Kafka® services, see :doc:`Karapace schema registry authorization `. diff --git a/docs/products/kafka/concepts/tiered-storage-guarantees.rst b/docs/products/kafka/concepts/tiered-storage-guarantees.rst index e8f21714b4..585695e459 100644 --- a/docs/products/kafka/concepts/tiered-storage-guarantees.rst +++ b/docs/products/kafka/concepts/tiered-storage-guarantees.rst @@ -17,7 +17,7 @@ Let's say you have a topic with a **total retention threshold** of **1000 GB** a * If the total size of the data exceeds 1000 GB, Apache Kafka will begin deleting the oldest data from remote storage. -Related reading +Related pages ---------------- * :doc:`Tiered storage in Aiven for Apache Kafka® overview ` diff --git a/docs/products/kafka/concepts/tiered-storage-how-it-works.rst b/docs/products/kafka/concepts/tiered-storage-how-it-works.rst index 2429cd9a96..7add4fc278 100644 --- a/docs/products/kafka/concepts/tiered-storage-how-it-works.rst +++ b/docs/products/kafka/concepts/tiered-storage-how-it-works.rst @@ -3,7 +3,7 @@ How tiered storage works in Aiven for Apache Kafka® .. important:: - Aiven for Apache Kafka® tiered storage is a :doc:`limited availability feature `. If you're interested in trying out this feature, contact the sales team at sales@Aiven.io. + Aiven for Apache Kafka® tiered storage is an :doc:`early availability feature `. If you're interested in trying out this feature, contact the sales team at sales@Aiven.io. Aiven for Apache Kafka® tiered storage is a feature that optimizes data management across two distinct storage tiers: @@ -50,12 +50,12 @@ Segments are encrypted with 256-bit AES encryption before being uploaded to the -Related reading +Related pages ---------------- * :doc:`Tiered storage in Aiven for Apache Kafka® overview ` * :doc:`Guarantees ` -* :doc:`Limitiations ` +* :doc:`Limitations ` * :doc:`Enabled tiered storage for Aiven for Apache Kafka® service ` diff --git a/docs/products/kafka/concepts/tiered-storage-limitations.rst b/docs/products/kafka/concepts/tiered-storage-limitations.rst index 198d6766c7..54fcab8a4a 100644 --- a/docs/products/kafka/concepts/tiered-storage-limitations.rst +++ b/docs/products/kafka/concepts/tiered-storage-limitations.rst @@ -12,7 +12,7 @@ Limitations * If you enable tiered storage on a service, you can't migrate the service to a different region or cloud, except for moving to a virtual cloud in the same region. For migration to a different region or cloud, contact `Aiven support `_. -Related reading +Related pages ---------------- * :doc:`Tiered storage in Aiven for Apache Kafka® overview ` diff --git a/docs/products/kafka/concepts/upgrade-procedure.rst b/docs/products/kafka/concepts/upgrade-procedure.rst index 6458210a0f..27bb1716c3 100644 --- a/docs/products/kafka/concepts/upgrade-procedure.rst +++ b/docs/products/kafka/concepts/upgrade-procedure.rst @@ -44,14 +44,15 @@ The following set of steps are executed during an upgrade procedure: 2. Once the new nodes are running, they join the Apache Kafka cluster -.. Note:: - The Apache Kafka cluster now contains a mix of old and new nodes + .. Note:: + + The Apache Kafka cluster now contains a mix of old and new nodes 3. The partition data and leadership is transferred to new nodes -.. mermaid:: + .. mermaid:: - flowchart TD; + flowchart TD; subgraph KafkaCluster subgraph Node1 @@ -87,23 +88,22 @@ The following set of steps are executed during an upgrade procedure: PartitionB2 -.-> PartitionNewB2 PartitionC2 -.-> PartitionNewC2 -.. Warning:: + .. Warning:: - This step is CPU intensive due to the additional data movement overhead. + This step is CPU intensive due to the additional data movement overhead. 4. Once old nodes don't have any partition data, they are retired from the cluster. -.. Note:: - - Depending on the cluster size more new nodes are added (by default up to 6 nodes at a time are replaced) + .. Note:: + Depending on the cluster size more new nodes are added (by default up to 6 nodes at a time are replaced) 5. The process is completed once the last old node has been removed from the cluster -.. mermaid:: + .. mermaid:: - flowchart TD; + flowchart TD; subgraph KafkaCluster subgraph NewNode1 diff --git a/docs/products/kafka/getting-started.rst b/docs/products/kafka/get-started.rst similarity index 100% rename from docs/products/kafka/getting-started.rst rename to docs/products/kafka/get-started.rst diff --git a/docs/products/kafka/howto/add-missing-producer-consumer-metrics.rst b/docs/products/kafka/howto/add-missing-producer-consumer-metrics.rst index 5f19b9af76..4da1eda696 100644 --- a/docs/products/kafka/howto/add-missing-producer-consumer-metrics.rst +++ b/docs/products/kafka/howto/add-missing-producer-consumer-metrics.rst @@ -1,8 +1,6 @@ -Add ``kafka.producer.`` and ``kafka.consumer`` Datadog metrics -============================================================== +Add client-side Apache Kafka® producer and consumer Datadog metrics +=================================================================== When you enable the :doc:`Datadog integration ` in Aiven for Apache Kafka®, the service supports all of the broker-side metrics listed in the `Datadog Kafka integration documentation `_ and allows you to send additional :doc:`custom metrics `. -However, all metrics that have a prefix like ``kafka.producer.*`` or ``kafka.consumer.*`` are client-side metrics that should be collected from the producer or consumer, and sent to Datadog. - -The dedicated `Datadog documentation `_ (see "Missing producer and consumer metrics" chapter) provides a way to include the missing metrics natively for Java based producers and consumers or via `DogStatsD `_ for clients in other languages. +Additionally, you can collect client-side metrics directly from the producer or consumer and send them to Datadog. For guidance, refer to the *Missing producer and consumer metrics* section in the `Datadog documentation `_, which outlines the process for integrating missing metrics natively for Java-based producers and consumers. For clients using languages other than Java, incorporating these metrics can be achieved through `DogStatsD `_. diff --git a/docs/products/kafka/howto/best-practices.rst b/docs/products/kafka/howto/best-practices.rst index 723a8e4948..36eb57961b 100644 --- a/docs/products/kafka/howto/best-practices.rst +++ b/docs/products/kafka/howto/best-practices.rst @@ -26,7 +26,7 @@ with a low number that allows efficient data processing and increase it if needed. As a general rule of thumb, the recommendation is to have max 4000 -partitions per broker, and max 200 000 partitions per cluster (`source `_). +partitions per broker, and max 200 000 partitions per cluster (`source `_). .. note:: Ordering is guaranteed only per partition. If you require relative ordering of records, you need to put that subset of data into the same partition. diff --git a/docs/products/kafka/howto/change-retention-period.rst b/docs/products/kafka/howto/change-retention-period.rst index 022ae1b708..ae7e0c6012 100644 --- a/docs/products/kafka/howto/change-retention-period.rst +++ b/docs/products/kafka/howto/change-retention-period.rst @@ -10,7 +10,7 @@ For a single topic To change the retention period for a single topic, follow these steps: -#. Log in to `Aiven Console `_ and select your Aiven for Apache Kafka® service. +#. In the `Aiven Console `_, select your project and then choose your Aiven for Apache Kafka® service. #. Select **Topics** from the left sidebar. @@ -38,15 +38,16 @@ To change the retention period for a single topic, follow these steps: At a service level ~~~~~~~~~~~~~~~~~~~ -#. Log in to `Aiven Console `_ and select your Aiven for Apache Kafka® service. -#. On the **Overview** page, scroll down to **Advanced configuration** and select **Change**. -#. In the **Edit advanced configuration** screen, select **Add configuration option**. -#. You have two options to configure the retention period for Apache Kafka® logs. +#. In the `Aiven Console `_, select your project and then choose your Aiven for Apache Kafka® service. +#. In the service page, select **Service settings** from the sidebar. +#. On the **Service settings** page, scroll down to the **Advanced configuration** section, and click **Configure**. +#. In the **Advanced configuration** dialog, click **Add configuration options**. +#. You have two options to configure the retention period for Apache Kafka® logs: - * You can either select ``kafka.log_retention_hours`` or ``kafka.log_retention_ms`` and set the desired length of time for retention. - * Alternatively, if you prefer to limit the amount of data retained based on storage usage, you can specify the value for ``kafka.log_retention_bytes``. + * You can either find ``kafka.log_retention_hours`` or ``kafka.log_retention_ms`` and set the desired length of time for retention. + * Alternatively, if you prefer to limit the amount of data retained based on storage usage, you can specify the value for ``kafka.log_retention_bytes``. -#. Click on **Save advanced configuration**. +#. Click on **Save configuration**. Unlimited retention ~~~~~~~~~~~~~~~~~~~~~ @@ -55,7 +56,7 @@ We do not limit the maximum retention period in any way, and in order to disable .. Warning:: - Using high retention periods without monitoring the available storage space can cause your service to run out of disk space. These situations are not covered by our SLA. + Using high retention periods without monitoring the available storage space can cause your service to run out of disk space. These situations are not covered by our SLA. diff --git a/docs/products/kafka/howto/configure-log-cleaner.rst b/docs/products/kafka/howto/configure-log-cleaner.rst index a55420f8c0..249d55bea3 100644 --- a/docs/products/kafka/howto/configure-log-cleaner.rst +++ b/docs/products/kafka/howto/configure-log-cleaner.rst @@ -5,18 +5,20 @@ The log cleaner serves the purpose of preserving only the latest value associate Enable log compaction for all topics ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -#. Log in to `Aiven Console `_ and select your Aiven for Apache Kafka service. -#. On the **Overview** page, scroll down to **Advanced configuration** and select **Add configuration option**. +#. In the `Aiven Console `_, select your project and then choose your Aiven for Apache Kafka® service. +#. In the service page, select **Service settings** from the sidebar. +#. On the **Service settings** page, scroll down to the **Advanced configuration** section, and click **Configure**. +#. In the **Advanced configuration** dialog, click **Add configuration options**. #. Find ``log.cleanup.policy`` in the list and select it. #. Set the value to ``compact``. -#. Select **Save advanced configuration**. +#. Click **Save configuration**. .. warning:: This change will affect all topics in the cluster that do not have a configuration override in place. Enable log compaction for a specific topic ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -#. Log in to `Aiven Console `_ and select your Aiven for Apache Kafka service. +#. In the `Aiven Console `_, select your project and then choose your Aiven for Apache Kafka® service. #. Select **Topics** from the left sidebar. #. Select a topic you want to modify and select **Modify** in the context menu. #. From the drop-down options for the **Cleanup policy**, select the value ``compact``. diff --git a/docs/products/kafka/howto/configure-topic-tiered-storage.rst b/docs/products/kafka/howto/configure-topic-tiered-storage.rst index fd444e9851..7b98bad654 100644 --- a/docs/products/kafka/howto/configure-topic-tiered-storage.rst +++ b/docs/products/kafka/howto/configure-topic-tiered-storage.rst @@ -1,11 +1,13 @@ Enable and configure tiered storage for topics =========================================================================== -Aiven for Apache Kafka® offers flexibility in configuring tiered storage and setting retention policies. This guide will walk you through the process of configuring tiered storage for individual topic and configuring local retention policies. - .. important:: - Aiven for Apache Kafka® tiered storage is a :doc:`limited availability feature `. If you're interested in trying out this feature, contact the sales team at sales@Aiven.io. + Aiven for Apache Kafka® tiered storage is a :doc:`early availability feature `. If you're interested in trying out this feature, contact the sales team at sales@Aiven.io. + + +Aiven for Apache Kafka® allows you to easily configure tiered storage and set retention policies for individual topics. Learn how to configure tiered storage for individual topics and set local retention policies step by step. + Prerequisite ------------ @@ -50,6 +52,10 @@ For an existing topic 5. Select **Update** to save your changes and activate tiered storage. +(Optional) configure client-side parameter +------------------------------------------- +For optimal performance and reduced risk of broker interruptions when using tiered storage, it is recommended to update the client-side parameter ``fetch.max.wait.ms`` from its default value of 500ms to 5000ms. + Enable tiered storage for topics via Aiven CLI ------------------------------------------------ diff --git a/docs/products/kafka/howto/connect-with-go.rst b/docs/products/kafka/howto/connect-with-go.rst index 89f2e9a421..bb8eb8d700 100644 --- a/docs/products/kafka/howto/connect-with-go.rst +++ b/docs/products/kafka/howto/connect-with-go.rst @@ -32,7 +32,7 @@ Go to the *Overview* page of your Aiven for Apache Kafka service. #. Next to *CA Certificate*, click **Download** and save the ``ca.pem`` file #. Note the *Password* required for the SASL, we'll need it for authentication -* Created the keystore ``client.keystore.p12`` and truststore ``client.truststore.jks`` by following :doc:`our article on configuring Java SSL to access Kafka <../howto/keystore-truststore>` +* Created the keystore ``client.keystore.p12`` and truststore ``client.truststore.jks`` by following :doc:`our article on configuring Java SSL to access Kafka ` .. Warning:: diff --git a/docs/products/kafka/howto/connect-with-java.rst b/docs/products/kafka/howto/connect-with-java.rst index 7b759cfbe3..817ba71186 100644 --- a/docs/products/kafka/howto/connect-with-java.rst +++ b/docs/products/kafka/howto/connect-with-java.rst @@ -33,7 +33,7 @@ Go to the *Overview* page of your Aiven for Apache Kafka service. #. Next to *CA Certificate*, click **Download** and save the ``ca.pem`` file #. Note the *Password* required for the SASL, we'll need it for authentication -* Created the keystore ``client.keystore.p12`` and truststore ``client.truststore.jks`` by following :doc:`our article on configuring Java SSL to access Kafka <../howto/keystore-truststore>` +* Created the keystore ``client.keystore.p12`` and truststore ``client.truststore.jks`` by following :doc:`our article on configuring Java SSL to access Kafka ` .. Warning:: diff --git a/docs/products/kafka/howto/connect-with-nodejs.rst b/docs/products/kafka/howto/connect-with-nodejs.rst index 91fb9becf4..fcba8242d8 100644 --- a/docs/products/kafka/howto/connect-with-nodejs.rst +++ b/docs/products/kafka/howto/connect-with-nodejs.rst @@ -7,29 +7,28 @@ Pre-requisites --------------- #. Install `node-rdkafka `_. Make sure that you have OpenSSL set up on your machine. +#. Go to the *Overview* page of your Aiven for Apache Kafka service and choose how to authenticate. -Go to the *Overview* page of your Aiven for Apache Kafka service. + * To connect with SSL authentication, in the *Connection information* section: -* If you are going to connect with SSL authentication: + #. If **Authentication Method** is shown, choose **Client Certificate** + #. Next to *Access Key*, click **Download** and save the ``service.key`` file. + #. Next to *Access Certificate*, click **Download** and save the ``service.cert`` file. + #. Next to *CA Certificate*, click **Download** and save the ``ca.pem`` file. - * In the *Connection information* section: + * To connect using SASL authentication: - #. If **Authentication Method** is shown, choose **Client Certificate** - #. Next to *Access Key*, click **Download** and save the ``service.key`` file. - #. Next to *Access Certificate*, click **Download** and save the ``service.cert`` file. - #. Next to *CA Certificate*, click **Download** and save the ``ca.pem`` file. + #. Follow the instructions at `Use SASL Authentication with Apache Kafka® `_ to enable SASL. -* If you are going to connect using SASL authentication: + #. In the *Connection Information* section - #. Follow the instructions at `Use SASL Authentication with Apache Kafka® `_ to enable SASL. + #. Select **SASL** as the **Authentication Method** + #. Next to *CA Certificate*, click **Download** and save the ``ca.pem`` file + #. Note the *Password* required for the SASL, we'll need it for authentication - #. In the *Connection Information* section - - #. Select **SASL** as the **Authentication Method** - #. Next to *CA Certificate*, click **Download** and save the ``ca.pem`` file - #. Note the *Password* required for the SASL, we'll need it for authentication - -Note that the *CA Certificate* ``ca.pem`` file has the same contents by either route. +.. note:: + + The *CA Certificate* ``ca.pem`` file has the same content regardless of the authentication method. .. Warning:: diff --git a/docs/products/kafka/howto/create-topic.rst b/docs/products/kafka/howto/create-topic.rst index 1efa788a17..acd3027c87 100644 --- a/docs/products/kafka/howto/create-topic.rst +++ b/docs/products/kafka/howto/create-topic.rst @@ -14,10 +14,10 @@ To create a new topic using the `Aiven Console `_, fo 1. Log in to `Aiven Console `_ and select the Aiven for Apache Kafka® service where you want to create the topic. 2. From the left sidebar, select **Topics**. -3. Select **Add topic** to create a new topic and enter a name for the topic. -4. If required, enable advanced configurations for the topic by toggling the corresponding option. +3. Select **Create topic** to create a new topic and enter a name for the topic. +4. If required, set the advanced configuration option to **Yes**. 5. In the **Topic advanced configuration** section, you can set properties such as the replication factor, number of partitions, and other settings. These settings can be modified later if needed. -6. Select **Add topic**. +6. Select **Create topic**. The new topic will be visible immediately, but may take a few minutes before you can update its settings. diff --git a/docs/products/kafka/howto/create-topics-automatically.rst b/docs/products/kafka/howto/create-topics-automatically.rst index e1aafaa1c4..c2e7ea2ec4 100644 --- a/docs/products/kafka/howto/create-topics-automatically.rst +++ b/docs/products/kafka/howto/create-topics-automatically.rst @@ -19,11 +19,12 @@ Enable automatic topic creation using Aiven Console To enable automatic topic creation through the Aiven Console, follow these steps: -1. Log in to `Aiven Console `_ and select your Aiven for Apache Kafka service. -2. On the **Overview** page, scroll down to **Advanced configuration** and select **Change**. -3. In the **Edit advanced configuration** screen, select **Add configuration option**. -4. Find the ``auto_create_topics_enable`` parameter and set it to true to enable automatic topic creation. -5. Select **Save advanced configuration**. +1. In the `Aiven Console `_, select your project and then choose your Aiven for Apache Kafka® service. +2. In the service page, select **Service settings** from the sidebar. +3. On the **Service settings** page, scroll down to the **Advanced configuration** section, and click **Configure**. +4. In the **Advanced configuration** dialog, click **Add configuration options**. +5. Find the ``auto_create_topics_enable`` parameter and set it to true to enable automatic topic creation. +6. Select **Save configuration**. .. Warning:: diff --git a/docs/products/kafka/howto/datadog-customised-metrics.rst b/docs/products/kafka/howto/datadog-customised-metrics.rst index aa16f8bae0..63b48cc894 100644 --- a/docs/products/kafka/howto/datadog-customised-metrics.rst +++ b/docs/products/kafka/howto/datadog-customised-metrics.rst @@ -1,17 +1,17 @@ Configure Apache Kafka® metrics sent to Datadog =============================================== -When creating a :doc:`Datadog service integration `, you can customise which metrics are sent to the Datadog endpoint using the :doc:`Aiven CLI `. +When creating a `Datadog service integration `_, customize which metrics are sent to the Datadog endpoint using the :doc:`Aiven CLI `. -For each Apache Kafka® topic and partition, the following metrics are currently supported: +The following metrics are currently supported for each topic and partition in Apache Kafka®: * ``kafka.log.log_size`` * ``kafka.log.log_start_offset`` * ``kafka.log.log_end_offset`` -.. Tip:: +.. note:: - All the above metrics are tagged with ``topic`` and ``partition`` allowing you to monitor each topic and partition independently. + All metrics are tagged with ``topic`` and ``partition``, enabling independent monitoring of each ``topic`` and ``partition``. Variables --------- @@ -23,41 +23,63 @@ Variable Description ================== ============================================================================ ``SERVICE_NAME`` Aiven for Apache Kafka® service name ------------------ ---------------------------------------------------------------------------- -``INTEGRATION_ID`` ID of the integration between the Aiven for Apache Kafka service and Datadog +``INTEGRATION_ID`` ID of the integration between Aiven for Apache Kafka service and Datadog ================== ============================================================================ -.. Tip:: - The ``INTEGRATION_ID`` parameter can be found by issuing:: - - avn service integration-list SERVICE_NAME +You can find the ``INTEGRATION_ID`` parameter by executing this command: -Customise Apache Kafka® metrics sent to Datadog ------------------------------------------------ +.. code:: + + avn service integration-list SERVICE_NAME -Before customising the metrics, make sure that you have a Datadog endpoint configured and enabled in your Aiven for Apache Kafka service. For details on how to set up the Datadog integration, check the :doc:`dedicated article `. Please note that in all the below parameters a 'comma separated list' has the following format: ``['value0','value1','value2','...']``. +Customize Apache Kafka® metrics for Datadog +---------------------------------------------------- -To customise the metrics sent to Datadog, you can use the ``service integration-update`` passing the following customised parameters: +Before customizing metrics, ensure a Datadog endpoint is configured and enabled in your Aiven for Apache Kafka service. For setup instructions, see :doc:`Send metrics to Datadog `. Format any listed parameters as a comma-separated list: ``['value0', 'value1', 'value2', ...]``. -* ``kafka_custom_metrics``: defining the comma separated list of custom metrics to include (within ``kafka.log.log_size``, ``kafka.log.log_start_offset`` and ``kafka.log.log_end_offset``) -* ``include_topics``: defining the comma separated list of topics to include -.. Tip:: +To customize the metrics sent to Datadog, you can use the ``service integration-update`` passing the following customized parameters: - By default, all topics are included. - -* ``exclude_topics``: defining the comma separated list of topics to exclude -* ``include_consumer_groups``: defining the comma separated list of consumer groups to include -* ``exclude_consumer_groups``: defining the comma separated list of consumer groups to include +* ``kafka_custom_metrics``: defining the comma-separated list of custom metrics to include (within ``kafka.log.log_size``, ``kafka.log.log_start_offset`` and ``kafka.log.log_end_offset``) +For example, to send the ``kafka.log.log_size`` and ``kafka.log.log_end_offset`` metrics, execute the following code: -As example to sent the ``kafka.log.log_size`` and ``kafka.log.log_end_offset`` metrics for ``topic1`` and ``topic2`` execute the following code:: +.. code:: avn service integration-update \ -c kafka_custom_metrics=['kafka.log.log_size','kafka.log.log_end_offset'] \ - -c include_topics=['topic1','topic2'] \ INTEGRATION_ID -Once the update is successful and metrics have been collected and pushed, you should see them in your Datadog explorer. -.. seealso:: Learn more about :doc:`/docs/integrations/datadog`. \ No newline at end of file +After you successfully update and the metrics are collected and sent to Datadog, you can view them in your Datadog explorer. + +.. seealso:: Learn more about :doc:`Datadog and Aiven `. + + +Customize Apache Kafka® consumer metrics for Datadog +----------------------------------------------------- + +`Kafka Consumer Integration `_ collects metrics for message offsets. To customize the metrics sent from this Datadog integration to Datadog, you can use the ``service integration-update`` passing the following customized parameters: + +* ``include_topics``: Specify a comma-separated list of topics to include. + + .. Note:: + + By default, all topics are included. + +* ``exclude_topics``: Specify a comma-separated list of topics to exclude. +* ``include_consumer_groups``: Specify a comma-separated list of consumer groups to include. +* ``exclude_consumer_groups``: Specify a comma-separated list of consumer groups to exclude. + +For example, to include topics ``topic1`` and ``topic2``, and exclude ``topic3``, execute the following code: + +.. code:: + + + avn service integration-update \ + -c kafka_custom_metrics="['kafka.log.log_size','kafka.log.log_end_offset']" \ + -c include_topics="['topic1','topic2']" \ + INTEGRATION_ID + +After you successfully update and the metrics are collected and sent to Datadog, you can view them in your Datadog explorer. diff --git a/docs/products/kafka/howto/enable-kafka-tiered-storage.rst b/docs/products/kafka/howto/enable-kafka-tiered-storage.rst index 1f6337b2df..ba9300a180 100644 --- a/docs/products/kafka/howto/enable-kafka-tiered-storage.rst +++ b/docs/products/kafka/howto/enable-kafka-tiered-storage.rst @@ -1,10 +1,12 @@ Enable tiered storage for Aiven for Apache Kafka® ===================================================== -Learn how to enable tiered storage capability of Aiven for Apache Kafka®. This topic provides step-by-step instructions for maximizing storage efficiency using either the `Aiven console `_ or the :doc:`Aiven CLI `. .. important:: - Aiven for Apache Kafka® tiered storage is a :doc:`limited availability feature `. If you're interested in trying out this feature, contact the sales team at sales@Aiven.io. + Aiven for Apache Kafka® tiered storage is an :doc:`early availability feature `. If you're interested in trying out this feature, contact the sales team at sales@Aiven.io. + +Tiered storage significantly improves the storage efficiency of your Aiven for Apache Kafka® service. You can enable this feature for your service using either the `Aiven console `_ or the :doc:`Aiven CLI `. + Prerequisites -------------- @@ -18,43 +20,54 @@ Enable tiered storage via Aiven Console Follow these steps to enable tiered storage for your service using the Aiven Console. 1. Access the `Aiven console `_, and select your project. -2. Create a new Aiven for Apache Kafka service or choose an existing one. +2. Either create a new Aiven for Apache Kafka service or select an existing one. - - If you are :doc:`creating a new service `: + - For :doc:`a new service `: a. On the **Create Apache Kafka® service** page, scroll down to the **Tiered storage** section. - b. To enable tiered storage, select the **Enable tiered storage** toggle. + b. Turn on the **Enable tiered storage** toggle to activate tiered storage. c. In the **Service summary**, you can view the pricing for tiered storage. - - If you are using an existing service: + - For an existing service: - a. Go to the service's **Overview** page, scroll down to the **Tiered storage** section. - b. To enable tiered storage, select the **Enable tiered storage** toggle. - - -3. Select the **Activate tiered storage** to save your settings and enable tiered storage for the service. + a. Go to the service's **Overview** page, select **Service settings** from the sidebar. + b. In the Service plan section, click **Enable tiered storage** to activate it. + +3. Click **Activate tiered storage** to confirm your settings and turn on tiered storage for your service. + +Following the activation of tiered storage for your service and :doc:`topics `, you can track usage and costs in the :doc:`Tiered storage overview ` section. -Once you have enabled tiered storage and it's in use, access the :doc:`Tiered storage overview ` on the left sidebar to get an overview of the overall usage and cost details. .. note:: - If tiered storage is not yet enabled for your service, clicking **Tiered storage** from the sidebar provides you with the option to activate tiered storage. + You can also enable tiered storage by clicking **Tiered storage** in the sidebar if it's not already active for your service. .. warning:: - If you power off a service with tiered storage active, all remote data will be permanently deleted. You will not be billed for tiered storage usage during the powered-off period. + If you power off a service with tiered storage active, you will permanently lose all remote data. However, you will not be charged for tiered storage while the service is off. -Configuring default retention policies at service-level +Configure default retention policies at service-level ````````````````````````````````````````````````````````````````````````````` 1. Access `Aiven console `_, select your project, and choose your Aiven for Apache Kafka service. -2. On the **Overview** page, navigate to **Advanced configuration** and select **Change**. -3. In the **Edit advanced configuration** view, choose **Add configuration option**. -4. To set the retention policy for Aiven for Apache Kafka tiered storage, select ``kafka.log_local_retention_ms`` for time-specific retention or ``kafka.log_local_retention_bytes`` for size-specific retention. -5. Select **Save advanced configuration** to apply your changes. +2. In the service page, select **Service settings** from the sidebar. +3. On the **Service settings** page, scroll down to the **Advanced configuration** section, and click **Configure**. +4. In the **Advanced configuration** dialog, click **Add configuration option**. +5. To define the retention policy for Aiven for Apache Kafka tiered storage, choose either of these options: + + * Find ``kafka.log_local_retention_ms`` and set the value to define the retention period in milliseconds for time-based retention. + + * Find ``kafka.log_local_retention_bytes`` and set the value to define the retention limit in bytes for size-based retention. + +6. Click **Save configuration** to apply your changes. Additionally, you can configure the retention policies from the :ref:`Tiered storage overview ` page. +(Optional) configure client-side parameter +------------------------------------------- +For optimal performance and reduced risk of broker interruptions when using tiered storage, it is recommended to update the client-side parameter ``fetch.max.wait.ms`` from its default value of 500ms to 5000ms. + + Enable tiered storage via Aiven CLI ----------------------------------------- Follow these steps to enable tiered storage for your Aiven for Apache Kafka service using the :doc:`Aiven CLI `: @@ -90,13 +103,11 @@ Follow these steps to enable tiered storage for your Aiven for Apache Kafka serv -c tiered_storage.enabled=true - - In this command: -* ``--project demo-kafka-project`` refers to the name of your project. In this example, it's ``demo-kafka-project``. -* ``demo-kafka-service`` denotes the Aiven for Apache Kafka® service you intend to update. -* ``-c tiered_storage.enabled=true`` is the configuration flag that activates tiered storage for your Aiven for Apache Kafka service. +* ``--project demo-kafka-project``: Replace ``demo-kafka-project`` with your project name. +* ``demo-kafka-service``: Specify the Aiven for Apache Kafka service you intend to update. +* ``-c tiered_storage.enabled=true``: Configuration flag that activates tiered storage for your Aiven for Apache Kafka service. diff --git a/docs/products/kafka/howto/enable-oidc.rst b/docs/products/kafka/howto/enable-oidc.rst index a86f807340..7cf5480cbb 100644 --- a/docs/products/kafka/howto/enable-oidc.rst +++ b/docs/products/kafka/howto/enable-oidc.rst @@ -10,7 +10,7 @@ Aiven for Apache Kafka integrates with a wide range of OpenID Connect identity p Before proceeding with the setup, ensure you have: -* :doc:`Aiven for Apache Kafka® ` service running. +* :doc:`Aiven for Apache Kafka® ` service running. * **Access to an OIDC provider**: Options include Auth0, Okta, Google Identity Platform, Azure, or any other OIDC compliant provider. * Required configuration details from your OIDC provider: @@ -27,10 +27,11 @@ Before proceeding with the setup, ensure you have: Enable OAuth2/OIDC via Aiven Console ------------------------------------------------------- -1. In `Aiven Console `_, select your project and then choose your Aiven for Apache Kafka® service. -2. On the **Overview** page, scroll down to **Advanced configuration** and select **Configure**. -3. In the **Advanced configuration** screen, select **Add configuration options**. -4. Set the following OIDC parameters: +1. In the `Aiven Console `_, select your project and then choose your Aiven for Apache Kafka® service. +2. In the service page, select **Service settings** from the sidebar. +3. On the **Service settings** page, scroll down to the **Advanced configuration** section, and click **Configure**. +4. In the **Advanced configuration** dialog, select **Add configuration options**. +5. Set the following OIDC parameters: * ``kafka.sasl_oauthbearer_jwks_endpoint_url`` @@ -60,7 +61,7 @@ Enable OAuth2/OIDC via Aiven Console Adjusting OIDC configurations, such as enabling, disabling, or modifying settings, can lead to a rolling restart of Kafka brokers. As a result, the brokers may temporarily operate with different configurations. To minimize any operational disruptions, plan to implement these changes during a maintenance window or at a time that ensures a minimal impact on your operations. -5. Select **Save configurations** to save your changes +6. Select **Save configurations** to save your changes @@ -91,6 +92,6 @@ For detailed explanations on the OIDC parameters, refer to the :ref:`console-aut -See also --------- +Related pages +------------- - Enable OAuth2/OIDC support for Apache Kafka® REST proxy \ No newline at end of file diff --git a/docs/products/kafka/howto/enabled-consumer-lag-predictor.rst b/docs/products/kafka/howto/enabled-consumer-lag-predictor.rst index 1866cc85e8..7361e26a3a 100644 --- a/docs/products/kafka/howto/enabled-consumer-lag-predictor.rst +++ b/docs/products/kafka/howto/enabled-consumer-lag-predictor.rst @@ -13,20 +13,22 @@ Prerequisites Before you start, ensure you have the following: - Aiven account. -- :doc:`Aiven for Apache Kafka® ` service running. +- :doc:`Aiven for Apache Kafka® ` service running. - :doc:`Prometheus integration ` set up for your Aiven for Apache Kafka for extracting metrics. - Necessary permissions to modify service configurations. Enable via Aiven Console ---------------------------------------------------- -1. In `Aiven Console `_, select your project and then choose your Aiven for Apache Kafka® service. +1. In the `Aiven Console `_, select your project and then choose your Aiven for Apache Kafka® service. -2. On the **Overview** page, scroll down to **Advanced configuration** and select **Configure**. +2. In the service page, click **Service settings** on the sidebar. -3. In the **Advanced configuration** screen, select **Add configuration options**. +3. Scroll to the **Advanced configuration** section, and select **Configure**. -4. In the add configuration options: +4. In the **Advanced configuration** screen, select **Add configuration options**. + +5. In the add configuration options: - Find and set ``kafka_lag_predictor.enabled`` to **Enabled** position. This enables the lag predictor to compute predictions for all consumer groups across all topics. - Find ``kafka_lag_predictor.group_filters`` and enter the desired consumer group pattern. This specifies which consumer groups to consider during lag prediction calculations. @@ -35,7 +37,7 @@ Enable via Aiven Console By default, the consumer lag predictor calculates the lag of all consumer groups. To restrict the calculation to specific groups, use the ``kafka_lag_predictor.group_filters`` option. -5. Select **Save configurations** to save your changes and enable consumer lag prediction. +6. Select **Save configuration** to save your changes and enable consumer lag prediction. Enable via Aiven CLI ------------------------------------------------ @@ -100,10 +102,10 @@ After enabling the consumer lag predictor, you can use Prometheus to access and * - Metric - Type - Description - * - ``kafka_lag_predictor_topic_produced_records`` + * - ``kafka_lag_predictor_topic_produced_records_total`` - Counter - Represents the total count of records produced. - * - ``kafka_lag_predictor_group_consumed_records`` + * - ``kafka_lag_predictor_group_consumed_records_total`` - Counter - Represents the total count of records consumed. * - ``kafka_lag_predictor_group_lag_predicted_seconds`` diff --git a/docs/products/kafka/howto/fake-sample-data.rst b/docs/products/kafka/howto/fake-sample-data.rst index e65708ebcd..d9ee79684f 100644 --- a/docs/products/kafka/howto/fake-sample-data.rst +++ b/docs/products/kafka/howto/fake-sample-data.rst @@ -7,7 +7,7 @@ Learning to work with streaming data is much more fun with data, so to get you s The following example is based on `Docker `_ images, which require `Docker `_ or `Podman `_ to be executed. -The following example assumes you have an Aiven for Apache Kafka® service running. You can create one following the :doc:`dedicated instructions `. +The following example assumes you have an Aiven for Apache Kafka® service running. You can create one following the :doc:`dedicated instructions `. Fake data generator on Docker @@ -17,9 +17,9 @@ To learn data streaming, you need a continuous flow of data and for that you can 1. Clone the repository: -.. code:: + .. code:: - git clone https://github.com/aiven/fake-data-producer-for-apache-kafka-docker + git clone https://github.com/aiven/fake-data-producer-for-apache-kafka-docker 2. Copy the file ``conf/env.conf.sample`` to ``conf/env.conf`` diff --git a/docs/products/kafka/howto/integrate-service-logs-into-kafka-topic.rst b/docs/products/kafka/howto/integrate-service-logs-into-kafka-topic.rst index 176fd17572..d27356899f 100644 --- a/docs/products/kafka/howto/integrate-service-logs-into-kafka-topic.rst +++ b/docs/products/kafka/howto/integrate-service-logs-into-kafka-topic.rst @@ -36,9 +36,9 @@ Test the integration (with Aiven for Apache Kafka) 2. Select **Topics** from the left sidebar and locate your topic you specified to send logs. 3. From the **Topic info** screen, select **Messages**. - .. note:: - - Alternatively, you can access the messages for a topic by selecting the ellipsis in the row of the topic and choosing **Topic messages**. + .. note:: + + Alternatively, you can access the messages for a topic by selecting the ellipsis in the row of the topic and choosing **Topic messages**. 4. In the **Messages** screen, select **Fetch Messages** to view the log entries that were sent from your source service. 5. To see the messages in JSON format, use the **FORMAT** drop-down menu and select *json*. diff --git a/docs/products/kafka/howto/kafka-conduktor.rst b/docs/products/kafka/howto/kafka-conduktor.rst index 902c3af577..646985051b 100644 --- a/docs/products/kafka/howto/kafka-conduktor.rst +++ b/docs/products/kafka/howto/kafka-conduktor.rst @@ -3,7 +3,7 @@ Connect to Apache Kafka® with Conduktor `Conduktor `_ is a friendly user interface for Apache Kafka, and it works well with Aiven. In fact, there is built-in support for setting up the connection. You will need to add the CA certificate for each of your Aiven projects to Conduktor before you can connect, this is outlined in the steps below. -1. Visit the **Service overview** page for your Aiven for Apache Kafka® service (the :doc:`/docs/products/kafka/getting-started` page is a good place for more information about creating a new service if you don't have one already). +1. Visit the **Service overview** page for your Aiven for Apache Kafka® service (the :doc:`/docs/products/kafka/get-started` page is a good place for more information about creating a new service if you don't have one already). 2. Download the **Access Key**, **Access Certificate** and **CA Certificate** (if you didn't have that already) into a directory on your computer. diff --git a/docs/products/kafka/howto/kafka-klaw.rst b/docs/products/kafka/howto/kafka-klaw.rst index f9a3a90c8c..28fb29879f 100644 --- a/docs/products/kafka/howto/kafka-klaw.rst +++ b/docs/products/kafka/howto/kafka-klaw.rst @@ -9,7 +9,7 @@ Prerequisites ------------- To connect Aiven for Apache Kafka® and Klaw, you need to have the following setup: -* A running Aiven for Apache Kafka® service. See :doc:`Getting started with Aiven for Apache Kafka ` for more information. +* A running Aiven for Apache Kafka® service. See :doc:`Getting started with Aiven for Apache Kafka ` for more information. * A running Klaw cluster. See `Run Klaw from the source `_ for more information. * Configured :doc:`Java keystore and truststore containing the service SSL certificates `. diff --git a/docs/products/kafka/howto/kafka-prometheus-privatelink.rst b/docs/products/kafka/howto/kafka-prometheus-privatelink.rst new file mode 100644 index 0000000000..e621a5aa9c --- /dev/null +++ b/docs/products/kafka/howto/kafka-prometheus-privatelink.rst @@ -0,0 +1,84 @@ +Configure Prometheus for Aiven for Apache Kafka® using Privatelink +==================================================================== + +You can integrate Prometheus with your Aiven for Apache Kafka® service using Privatelink for secure monitoring. This setup uses a Privatelink load balancer, which allows for efficient service discovery of Apache Kafka nodes and enables you to connect to your Aiven for Apache Kafka service using a private endpoint in your network or VPCs. + + +Prerequisites +------------- + +Before you start, ensure you have the following: + +- :doc:`Aiven for Apache Kafka® ` service running. +- :doc:`Prometheus integration ` set up for your Aiven for Apache Kafka for extracting metrics. +- Necessary permissions to modify service configurations. + + +Configuration steps +-------------------- + +Basic configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Begin by configuring Prometheus to scrape metrics from your Aiven for Apache Kafka service. This setup involves specifying various parameters for secure data retrieval. Following is an example configuration: + +.. code-block:: yaml + + scrape_configs: + - job_name: aivenmetrics + scheme: https + tls_config: + insecure_skip_verify: true + basic_auth: + username: + password: + http_sd_configs: + - url: + refresh_interval: 120s + tls_config: + insecure_skip_verify: true + basic_auth: + username: + password: + +**Configuration details**: + +- ``job_name``: Identifies the set of targets, e.g., ``aivenmetrics``. +- ``scheme``: Specifies the protocol, typically ``https``. +- ``tls_config``: Manages TLS settings. + + .. note:: + Setting ``insecure_skip_verify: true`` is crucial, as it permits Prometheus to disregard TLS certificate validation against host IP addresses, facilitating seamless connectivity. + +- ``basic_auth``: Provides authentication credentials for Apache Kafka service access. +- ``http_sd_configs``: Configures HTTP Service Discovery. Includes: + + - ``url``: The URI for Prometheus Privatelink service access. + - ``refresh_interval``: The frequency of target list refresh, e.g., ``120s``. + +.. note:: + The ``basic_auth`` and ``tls_config`` are specified twice - first for scraping the HTTP SD response and then to retrieve service metrics. This duplication is necessary because the same authentication and security settings are used to retrieve the service discovery information and scrape the metrics. + +(Optional) Metadata and relabeling +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If your setup involves multiple Privatelink connections, you can leverage Prometheus's relabeling for better target management. This approach allows you to dynamically modify target label sets before scraping. + +To manage metrics from different Privatelink connections, include the ``__meta_privatelink_connection_id`` label in your configuration. This setup helps categorize and filter relevant metrics for each connection. + +.. code-block:: yaml + + relabel_configs: + - source_labels: [__meta_privatelink_connection_id] + regex: 1 + action: keep + + +The ``regex: 1`` in the configuration is a placeholder. Make sure to replace ``1`` with the actual Privatelink connection ID that you wish to monitor. + + + +Related pages +-------------- + +* :doc:`Aiven for Apache Kafka® metrics available via Prometheus ` \ No newline at end of file diff --git a/docs/products/kafka/howto/kafka-sasl-auth.rst b/docs/products/kafka/howto/kafka-sasl-auth.rst index e17348f3bf..c7adcaa7b3 100644 --- a/docs/products/kafka/howto/kafka-sasl-auth.rst +++ b/docs/products/kafka/howto/kafka-sasl-auth.rst @@ -3,15 +3,12 @@ Use SASL authentication with Aiven for Apache Kafka® Aiven offers a selection of :doc:`authentication methods for Apache Kafka® <../concepts/auth-types>`, including `SASL `_ (Simple Authentication and Security Layer). -1. Log in to `Aiven Console `_ and choose your project. -2. From the list of services, choose the Aiven for Apache Kafka service for which you wish to enable SASL. -3. On the **Overview** page of the selected service, scroll down to the **Advanced configuration** section. -4. Select **Change**. -5. Enable the ``kafka_authentication_methods.sasl`` setting, and then select **Save advanced configuration**. - - .. image:: /images/products/kafka/enable-sasl.png - :alt: Enable SASL authentication for Apache Kafka - :width: 100% +1. In the `Aiven Console `_, select your project and then choose your Aiven for Apache Kafka® service. +2. In the service page, select **Service settings** from the sidebar. +3. On the **Service settings** page, scroll down to the **Advanced configuration** section. +4. Click **Configure**. +5. In the **Advanced configuration** dialog, set the ``kafka_authentication_methods.sasl`` toggle to the enabled position. +6. Click **Save configuration**. The **Connection information** at the top of the **Overview** page will now offer the ability to connect via SASL or via Client Certificate. diff --git a/docs/products/kafka/howto/kafka-tiered-storage-get-started.rst b/docs/products/kafka/howto/kafka-tiered-storage-get-started.rst index e29c7510df..40fe498c18 100644 --- a/docs/products/kafka/howto/kafka-tiered-storage-get-started.rst +++ b/docs/products/kafka/howto/kafka-tiered-storage-get-started.rst @@ -2,13 +2,14 @@ Get started with tiered storage for Aiven for Apache Kafka® ==================================================================== +.. important:: + + Aiven for Apache Kafka® tiered storage is a :doc:`limited availability feature `. If you're interested in trying out this feature, contact the sales team at sales@Aiven.io. + Aiven for Apache Kafka®'s tiered storage optimizes resources by keeping recent data—typically the most accessed—on faster local disks. As data becomes less active, it's transferred to more economical, slower storage, balancing performance with cost efficiency. For an in-depth understanding of tiered storage, how it works, and its benefits, see :doc:`Tiered storage in Aiven for Apache Kafka® `. -.. important:: - - Aiven for Apache Kafka® tiered storage is a :doc:`limited availability feature `. If you're interested in trying out this feature, contact the sales team at sales@Aiven.io. Enable tiered storage for service ---------------------------------- diff --git a/docs/products/kafka/howto/keystore-truststore.rst b/docs/products/kafka/howto/keystore-truststore.rst index ca509988d2..d6bfd4b00a 100644 --- a/docs/products/kafka/howto/keystore-truststore.rst +++ b/docs/products/kafka/howto/keystore-truststore.rst @@ -7,44 +7,52 @@ right tools to be able to communicate with the Aiven services. Keystores and truststores are password-protected files accessible by the client that interacts with the service. To create these files: -1. Log in to `Aiven Console `_ and select your Aiven for Apache Kafka service. +Access service certificates +`````````````````````````````````````````` +* Log in to `Aiven Console `_ and select your Apache Kafka service. +* Download the **Access Key**, **Access Certificate**, and **CA Certificate**. The files "service.key", "service.cert", and "ca.pem" are necessary for the following steps. -2. Download the **Access Key**, **Access Certificate** and **CA Certificate**. The resulting ``service.key``, ``service.cert`` and ``ca.pem`` are going to be used in the following steps. + .. image:: /images/products/kafka/ssl-certificates-download.png + :alt: Access Key, Access Certificate, and CA Certificate download from Aiven Console - .. image:: /images/products/kafka/ssl-certificates-download.png - :alt: Download the Access Key, Access Certificate and CA Certificate from the Aiven console +Create the keystore +```````````````````````````````````` -3. Use the ``openssl`` utility to create the keystore with the ``service.key`` and - ``service.cert`` files downloaded previously: +* Use the ``openssl`` utility to create a keystore using the downloaded ``service.key`` and ``service.cert``: - .. code:: + .. code-block:: - openssl pkcs12 -export \ - -inkey service.key \ - -in service.cert \ - -out client.keystore.p12 \ + openssl pkcs12 -export \ + -inkey service.key \ + -in service.cert \ + -out client.keystore.p12 \ -name service_key - .. Note:: - The format has to be ``PKCS12`` , which is the default since Java 9. + .. Note:: + Ensure the keystore format is ``PKCS12``, the default since Java 9. -5. Enter a password to protect the keystore and the key, when prompted +* Set a password for the keystore and key when prompted. -6. In the folder where the certificates are stored, use the ``keytool`` utility to create the truststore with the ``ca.pem`` file as input: +Create the truststore +`````````````````````````````` - .. code:: - - keytool -import \ - -file ca.pem \ - -alias CA \ - -keystore client.truststore.jks +* In the directory containing the certificates, use the ``keytool`` utility to create a truststore with the ``ca.pem`` file: + + .. code-block:: -7. Enter a password to protect the truststores, when prompted + keytool -import \ + -file ca.pem \ + -alias CA \ + -keystore client.truststore.jks -8. Reply to ``yes`` to confirm trusting the CA certificate, when prompted +* When prompted, enter a password for the truststore and confirm trust in the CA certificate. -The result are the keystore named ``client.keystore.p12`` and truststore named ``client.truststore.jks`` that can be used for client applications configuration. +Resulting configuration files +`````````````````````````````` +The process generates two files: "client.keystore.p12" (keystore) and "client.truststore.jks" (truststore). These files are ready for client configuration. .. Tip:: - You can use :doc:`Aiven CLI ` ``avn service user-kafka-java-creds`` to automate the creation of both the keystore and the truststore. Check the :ref:`dedicated page ` for more details. + Use the :doc:`Aiven CLI ` command ``avn service user-kafka-java-creds`` to automate keystore and truststore creation. For more information, see :ref:`avn_service_user_kafka_java_creds`. + + diff --git a/docs/products/kafka/howto/prevent-full-disks.rst b/docs/products/kafka/howto/prevent-full-disks.rst index 976a01bae2..0a88ba84f7 100644 --- a/docs/products/kafka/howto/prevent-full-disks.rst +++ b/docs/products/kafka/howto/prevent-full-disks.rst @@ -18,11 +18,13 @@ When the disk space is insufficient, and the ACL blocks write operations, you wi Upgrade to a larger service plan ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -#. Login to the `Aiven Console `_ and select your service. +#. In the `Aiven Console `_, select your project and then choose your Aiven for Apache Kafka® service. -#. On the **Overview** page, scroll down to **Service plan** and select **Change plan**. +#. In the service page, select **Service settings** from the sidebar. -#. Select your new service plan and select **Change**. +#. On the **Service settings** page, scroll to **Service plan** and select **Change plan** from the **Actions (…)** menu. + +#. In the **Change service plan** dialog, select your new service plan and select **Change**. This will deploy new nodes with increased disk space. Once the data is migrated from the old nodes to the new ones, disk usage will return to an acceptable level, and write operations will be allowed again. @@ -36,9 +38,9 @@ Follow the steps from our article on :doc:`how to add additional storage to your Delete one or more topics ~~~~~~~~~~~~~~~~~~~~~~~~~ -#. Login to the `Aiven Console `__ and select your service. +#. In the `Aiven Console `_, select your project and then choose your Aiven for Apache Kafka® service. -#. Select **Topics** from the left sidebar. +#. Select **Topics** from the sidebar. #. Select the topic you want to remove, and in the **Topic info** screen, select **Remove**. @@ -53,8 +55,6 @@ You can also use the CLI command :doc:`avn cli delete-topic `. +Another way to make more space available without deleting an entire topic is to reduce the retention time or size for one or more topics. If +you know how old the oldest messages are in a topic, you can lower the retention time for the topic to make more space available. Follow the instructions :doc:`to change retention period `. diff --git a/docs/products/kafka/howto/provectus-kafka-ui.rst b/docs/products/kafka/howto/provectus-kafka-ui.rst index 961b5fd317..69c6f9cb03 100644 --- a/docs/products/kafka/howto/provectus-kafka-ui.rst +++ b/docs/products/kafka/howto/provectus-kafka-ui.rst @@ -40,9 +40,11 @@ Since container for Provectus® UI for Apache Kafka® uses non-root user, to avo chmod 700 SSL_STORE_FOLDER -3. Copy secrets there (replace the ``SSL_KEYSTORE_FILE_NAME`` and ``SSL_TRUSTSTORE_FILE_NAME`` with the keystores and truststores file names):: +3. Copy secrets there (replace the ``SSL_KEYSTORE_FILE_NAME`` and ``SSL_TRUSTSTORE_FILE_NAME`` with the keystores and truststores file names): - cp SSL_KEYSTORE_FILE_NAME SSL_TRUSTSTORE_FILE_NAME SSL_STORE_FOLDER + .. code:: + + cp SSL_KEYSTORE_FILE_NAME SSL_TRUSTSTORE_FILE_NAME SSL_STORE_FOLDER 4. Give read permissions for secret files for everyone: diff --git a/docs/products/kafka/howto/schema-registry.rst b/docs/products/kafka/howto/schema-registry.rst index 9c0b92d660..12f3685c3d 100644 --- a/docs/products/kafka/howto/schema-registry.rst +++ b/docs/products/kafka/howto/schema-registry.rst @@ -70,13 +70,17 @@ Once the schema is defined, you need to compile it, and it can be done **manuall Manual schema compilation ~~~~~~~~~~~~~~~~~~~~~~~~~~ -In case of manual schema compilation, download ``avro-tools-1.11.0.jar`` from https://avro.apache.org/releases.html or via maven using the following:: +In case of manual schema compilation, download ``avro-tools-1.11.0.jar`` from https://avro.apache.org/releases.html or via maven using the following: - mvn org.apache.maven.plugins:maven-dependency-plugin:2.8:get -Dartifact=org.apache.avro:avro-tools:1.11.0:jar -Ddest=avro-tools-1.11.0.jar +.. code:: -The schema defined in the previous step, can be now compiled to produce a Java class ``ClickRecord.java`` in the ``io.aiven.avro.example`` package (taken from the ``namespace`` parameter):: + mvn org.apache.maven.plugins:maven-dependency-plugin:2.8:get -Dartifact=org.apache.avro:avro-tools:1.11.0:jar -Ddest=avro-tools-1.11.0.jar - java -jar avro-tools-1.11.0.jar compile schema ClickRecord.avsc . +The schema defined in the previous step, can be now compiled to produce a Java class ``ClickRecord.java`` in the ``io.aiven.avro.example`` package (taken from the ``namespace`` parameter): + +.. code:: + + java -jar avro-tools-1.11.0.jar compile schema ClickRecord.avsc . .. Note:: diff --git a/docs/products/kafka/howto/tiered-storage-overview-page.rst b/docs/products/kafka/howto/tiered-storage-overview-page.rst index c8860b4696..4f72130664 100644 --- a/docs/products/kafka/howto/tiered-storage-overview-page.rst +++ b/docs/products/kafka/howto/tiered-storage-overview-page.rst @@ -1,20 +1,20 @@ Tiered storage overview in Aiven Console ======================================== -Aiven for Apache Kafka® offers a comprehensive overview of tiered storage, allowing you to understand its usage and make informed decisions. This overview provides insights into various aspects of tiered storage, including billing, settings, and storage details. +Aiven for Apache Kafka® offers a comprehensive overview of tiered storage, allowing you to understand its usage and make informed decisions. This overview provides insights into various aspects of tiered storage, including hourly billing, settings, and storage details. .. important:: - Aiven for Apache Kafka® tiered storage is a :doc:`limited availability feature `. If you're interested in trying out this feature, contact the sales team at sales@Aiven.io. + Aiven for Apache Kafka® tiered storage is a :doc:`early availability feature `. If you're interested in trying out this feature, contact the sales team at sales@Aiven.io. Access tiered storage overview -------------------------------- -1. In the `Aiven console `_, choose your project and select your Aiven for Apache Kafka service. +1. Log in to the `Aiven console `_, choose your project and select your Aiven for Apache Kafka service. 2. From the left sidebar, select **Tiered Storage**. - - If you haven't enabled tiered storage for your service, you'll have the option to enable it. + - If tiered storage is not yet enabled for your service, you will see the option to enabled it. - If tiered storage is enabled but not configured for any topics, you have the option to set it up for topics directly. For more details, see :doc:`Enable and configure tiered storage for topics `. 3. Once configured, you can view an overview of tiered storage and its associated details. @@ -25,7 +25,7 @@ Key insights of tiered storage Get a quick snapshot of the essential metrics and details related to tiered storage: -- **Current billing expenses in USD**: Stay informed about your current tiered storage expenses. +- **Current billing expenses in USD**: Displays your tiered storage costs, calculated at hourly rates. - **Forecasted month cost in USD**: Estimate your upcoming monthly costs based on current usage. - **Remote tier usage in bytes**: View the volume of data that has been tiered. - **Storage overview**: View how topics use :ref:`remote storage `. @@ -44,14 +44,14 @@ This section provides an overview of the current local cache details and retenti Modify retention policies ````````````````````````````````` -1. In the **Tiered storage settings** section, select the ellipsis (three dots) and select **Update tiered storage settings**. +1. In the **Tiered storage settings** section, click **Actions(...)** and click **Update tiered storage settings**. 2. Within **Update tiered storage settings** page, adjust the values for: - Local Cache - Default Local Retention Time (ms) - Default Local Retention Bytes -3. Confirm your adjustments by selecting **Save changes**. +3. Click **Save changes**. .. _remote-storage-overview: @@ -60,6 +60,6 @@ Remote storage overview Explore the specifics of your storage usage and configurations: -- **Remote storage usage by topics**: Take a deep dive into how much tiered storage is used per topic. +- **Remote storage usage by topics**: Analyze how much tiered storage each topic uses. - **Filter by topic**: Narrow down your view to specific topics for focused insights. diff --git a/docs/products/kafka/howto/use-zookeeper.rst b/docs/products/kafka/howto/use-zookeeper.rst index 69d0f272e3..fab3384e28 100644 --- a/docs/products/kafka/howto/use-zookeeper.rst +++ b/docs/products/kafka/howto/use-zookeeper.rst @@ -5,11 +5,11 @@ Apache ZooKeeper™ is a crucial component used by Apache Kafka® and Aiven mana In order to change ZooKeeper configuration properties follow these steps: -#. Log in to `Aiven Console `_ and select your service. -#. On the **Overview** page, scroll down to the **Advanced configuration** section. -#. Select **Change** to add configuration option. -#. On the **Edit advanced configuration** screen, select **+ Add configuration option** to add new configurations or modify the values of existing configurations. -#. Select **Save advanced configuration**. +#. In the `Aiven Console `_, select your project and then choose your Aiven for Apache Kafka® service. +#. In the service page, select **Service settings** from the sidebar. +#. On the **Service settings** page, scroll down to the **Advanced configuration** section, and click **Configure**. +#. In the **Advanced configuration** dialog, click **Add configuration option** to add new configurations or modify the values of existing configurations. +#. Click **Save configuration**. The service configuration will be then updated. diff --git a/docs/products/kafka/kafka-connect.rst b/docs/products/kafka/kafka-connect.rst index 2b46b20b70..5f0645c8c9 100644 --- a/docs/products/kafka/kafka-connect.rst +++ b/docs/products/kafka/kafka-connect.rst @@ -1,19 +1,10 @@ Aiven for Apache Kafka® Connect =============================== -What is Aiven for Apache Kafka® Connect? ----------------------------------------- - Aiven for Apache Kafka® Connect is a fully managed **distributed Apache Kafka® integration component**, deployable in the cloud of your choice. Apache Kafka Connect lets you integrate your existing data sources and sinks with Apache Kafka. With an Apache Kafka Connect connector, you can source data from an existing technology into a topic or sink data from a topic to a target technology by defining the endpoints. - -Why Apache Kafka® Connect? --------------------------- - -Apache Kafka represents the best in class data streaming solution. Apache Kafka Connect allows integrating Apache Kafka with the rest of your data architecture with a configuration file which defines the source and the target of your data. - Source connectors ----------------- @@ -127,7 +118,7 @@ Sink connectors Get started with Aiven for Apache Kafka® Connect ------------------------------------------------ -Take your first steps with Aiven for Apache Kafka Connect by following our :doc:`/docs/products/kafka/kafka-connect/getting-started` article, or browse through our full list of articles: +Take your first steps with Aiven for Apache Kafka Connect by following our :doc:`/docs/products/kafka/kafka-connect/get-started` article, or browse through our full list of articles: .. grid:: 1 2 2 2 diff --git a/docs/products/kafka/kafka-connect/concepts/list-of-connector-plugins.rst b/docs/products/kafka/kafka-connect/concepts/list-of-connector-plugins.rst index 5e316ba9ac..c5f47b1aed 100644 --- a/docs/products/kafka/kafka-connect/concepts/list-of-connector-plugins.rst +++ b/docs/products/kafka/kafka-connect/concepts/list-of-connector-plugins.rst @@ -17,7 +17,7 @@ Source connectors enable the integration of data from an existing technology int * `Debezium for MySQL `__ -* :doc:`Debezium for PostgreSQL® <../howto/debezium-source-connector-pg>` +* :doc:`Debezium for PostgreSQL® ` * `Debezium for SQL Server `__ @@ -38,7 +38,7 @@ Sink connectors Sink connectors enable the integration of data from an existing Apache Kafka topic to a target technology. The following is the list of available sink connectors: -* :doc:`Aiven for Apache Kafka® S3 Sink Connector <../howto/s3-sink-connector-aiven>` +* :doc:`Aiven for Apache Kafka® S3 Sink Connector ` * `Confluent Amazon S3 Sink `__ diff --git a/docs/products/kafka/kafka-connect/getting-started.rst b/docs/products/kafka/kafka-connect/get-started.rst similarity index 100% rename from docs/products/kafka/kafka-connect/getting-started.rst rename to docs/products/kafka/kafka-connect/get-started.rst diff --git a/docs/products/kafka/kafka-connect/howto/debezium-source-connector-mongodb.rst b/docs/products/kafka/kafka-connect/howto/debezium-source-connector-mongodb.rst index cb287b625e..a884388bcd 100644 --- a/docs/products/kafka/kafka-connect/howto/debezium-source-connector-mongodb.rst +++ b/docs/products/kafka/kafka-connect/howto/debezium-source-connector-mongodb.rst @@ -69,11 +69,11 @@ The configuration file contains the following entries: * ``tasks.max``: maximum number of tasks to execute in parallel. By default this is 1, the connector can use at most 1 task for each collection defined. Replace ``NR_TASKS`` with the amount of parallel task based on the number of input collections. * ``key.converter`` and ``value.converter``: defines the messages data format in the Apache Kafka topic. The ``io.confluent.connect.avro.AvroConverter`` converter pushes messages in Avro format. To store the messages schema we use Aiven's `Karapace schema registry `_ as specified by the ``schema.registry.url`` parameter and related credentials. - .. Note:: + .. Note:: - The ``key.converter`` and ``value.converter`` sections are only needed when pushing data in Avro format. If omitted the messages will be defined in JSON format. + The ``key.converter`` and ``value.converter`` sections are only needed when pushing data in Avro format. If omitted the messages will be defined in JSON format. - The ``USER_INFO`` is **not** a placeholder, no substitution is needed for that parameter. + The ``USER_INFO`` is **not** a placeholder, no substitution is needed for that parameter. Create a Kafka Connect connector with the Aiven Console diff --git a/docs/products/kafka/kafka-connect/howto/debezium-source-connector-pg-node-replacement.rst b/docs/products/kafka/kafka-connect/howto/debezium-source-connector-pg-node-replacement.rst index c30e4e0b29..3d86a165c6 100644 --- a/docs/products/kafka/kafka-connect/howto/debezium-source-connector-pg-node-replacement.rst +++ b/docs/products/kafka/kafka-connect/howto/debezium-source-connector-pg-node-replacement.rst @@ -14,7 +14,9 @@ The connector is able to recover from temporary errors to the database and start Common Debezium errors related to PostgreSQL node replacement ------------------------------------------------------------- -In cases when the Debezium connector can't recover during or after the PostgreSQL node replacements, the following errors are commonly shown in logs:: +In cases when the Debezium connector can't recover during or after the PostgreSQL node replacements, the following errors are commonly shown in logs: + +.. code:: # ERROR 1 org.apache.kafka.connect.errors.ConnectException: Could not create PostgreSQL connection diff --git a/docs/products/kafka/kafka-connect/howto/gcp-bigquery-sink-prereq.rst b/docs/products/kafka/kafka-connect/howto/gcp-bigquery-sink-prereq.rst index 88b25ecd8b..9561da649a 100644 --- a/docs/products/kafka/kafka-connect/howto/gcp-bigquery-sink-prereq.rst +++ b/docs/products/kafka/kafka-connect/howto/gcp-bigquery-sink-prereq.rst @@ -15,7 +15,7 @@ To be able to sink data from Apache Kafka® to Google BigQuery via the dedicated Create a new Google service account and generate a JSON service key ------------------------------------------------------------------- -Follow the `instructions `_ to: +Follow the `instructions `_ to: * create a new Google service account * create a JSON service key diff --git a/docs/products/kafka/kafka-connect/howto/gcs-sink-prereq.rst b/docs/products/kafka/kafka-connect/howto/gcs-sink-prereq.rst index 5c15b843a2..2e95e5ae14 100644 --- a/docs/products/kafka/kafka-connect/howto/gcs-sink-prereq.rst +++ b/docs/products/kafka/kafka-connect/howto/gcs-sink-prereq.rst @@ -4,7 +4,7 @@ Configure GCP for a Google Cloud Storage sink connector To be able to sink data from Apache Kafka® to Google Cloud Storage via the dedicated Aiven connector, you need to perform the following steps in the `GCP console `_: * Create a `Google Cloud Storage (GCS) bucket `_ where the data is going to be stored -* Create a new `Google service account and generate a JSON service key `_ +* Create a new `Google service account and generate a JSON service key `_ * Grant the service account access to the GCS bucket .. _gcs-sink-connector-google-bucket: @@ -19,7 +19,7 @@ You can create the GCS bucket using the `dedicated Google cloud console page `_ to: +Follow the `instructions `_ to: * create a new Google service account * create a JSON service key diff --git a/docs/products/kafka/kafka-connect/howto/influx-sink.rst b/docs/products/kafka/kafka-connect/howto/influx-sink.rst index c08e7a694a..0e8db0f842 100644 --- a/docs/products/kafka/kafka-connect/howto/influx-sink.rst +++ b/docs/products/kafka/kafka-connect/howto/influx-sink.rst @@ -24,13 +24,12 @@ Furthermore you need to collect the following information about the target Influ * ``TOPIC_LIST``: The list of topics to sink divided by comma * ``KCQL_TRANSFORMATION``: The KCQL syntax to parse the topic data, should be in the format - :: - - INSERT - INTO INFLUXDB_TABLe_NAME - SELECT LIST_OF_FIELDS - FROM APACHE_KAFKA_TOPIC + .. code:: + INSERT + INTO INFLUXDB_TABLe_NAME + SELECT LIST_OF_FIELDS + FROM APACHE_KAFKA_TOPIC * ``APACHE_KAFKA_HOST``: The hostname of the Apache Kafka service, only needed when using Avro as data format * ``SCHEMA_REGISTRY_PORT``: The Apache Kafka's schema registry port, only needed when using Avro as data format diff --git a/docs/products/kafka/kafka-connect/howto/redis-streamreactor-sink.rst b/docs/products/kafka/kafka-connect/howto/redis-streamreactor-sink.rst index b54178de2d..f872daf3b8 100644 --- a/docs/products/kafka/kafka-connect/howto/redis-streamreactor-sink.rst +++ b/docs/products/kafka/kafka-connect/howto/redis-streamreactor-sink.rst @@ -3,6 +3,10 @@ Create a stream reactor sink connector from Apache Kafka® to Redis®* **The Redis stream reactor sink connector** enables you to move data from **an Aiven for Apache Kafka® cluster** to **a Redis®* database**. The Lenses.io implementation enables you to write `KCQL transformations `_ on the topic data before sending it to the Redis database. +.. important:: + + A known issue with the ``GEOADD`` command in version 4.2.0 of the Redis stream reactor sink connector may cause exceptions during initialization under specific configurations. For more information, see the `GitHub issue `_. + .. note:: You can check the full set of available parameters and configuration options in the `connector's documentation `_. diff --git a/docs/products/kafka/kafka-connect/howto/snowflake-sink-prereq.rst b/docs/products/kafka/kafka-connect/howto/snowflake-sink-prereq.rst index b3fdaa9ddf..4f3c7f169d 100644 --- a/docs/products/kafka/kafka-connect/howto/snowflake-sink-prereq.rst +++ b/docs/products/kafka/kafka-connect/howto/snowflake-sink-prereq.rst @@ -15,17 +15,21 @@ Configure a Snowflake key pair authentication The Apache Kafka BigQuery sink connector requires a key pair authentication with a minimum 2048-bit RSA. You need to generate the key pair locally and then upload the public key to Snowflake as defined in the `dedicated documentation `_. The following procedure guides you in the necessary steps: -1. Generate the private key using ``openssl``:: - - openssl genrsa 2048 | openssl pkcs8 -topk8 -inform PEM -out rsa_key.p8 +1. Generate the private key using ``openssl``: + + .. code:: + + openssl genrsa 2048 | openssl pkcs8 -topk8 -inform PEM -out rsa_key.p8 .. Note:: You'll be prompted for the private key password, note it down since it'll be required in the following steps -2. Generate the public key using ``openssl``:: +2. Generate the public key using ``openssl``: - openssl rsa -in rsa_key.p8 -pubout -out rsa_key.pub + .. code:: + + openssl rsa -in rsa_key.p8 -pubout -out rsa_key.pub The above commands create two files: @@ -38,33 +42,40 @@ Create a dedicated Snowflake user and add the public key You need to associate the public key generated at the previous with a new or existing Snowflake user. The following steps define how to create a new user and associate the public key to it. 1. In the Snowflake UI, navigate to the **Worksheets** panel, and ensure to use a role with enough privileges (**SECURITYADMIN** or **ACCOUNTADMIN**) -2. Run the following query to create a user:: - - CREATE USER aiven; +2. Run the following query to create a user: + + .. code:: + + CREATE USER aiven; 3. Copy from ``rsa_key.pub`` all the content between ``-----BEGIN PUBLIC KEY-----`` and ``-----END PUBLIC KEY-----`` and remove the newlines .. Note:: - The generated public key is usually stored on various lines, like:: + The generated public key is usually stored on various lines, like: + + .. code:: -----BEGIN PUBLIC KEY----- YXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX - XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX - XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX - XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXY -----END PUBLIC KEY----- - The output for the following command is the content between ``-----BEGIN PUBLIC KEY-----`` and ``-----END PUBLIC KEY-----`` in one line, like:: + The output for the following command is the content between ``-----BEGIN PUBLIC KEY-----`` and ``-----END PUBLIC KEY-----`` in one line, like: + + .. code:: + + YXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + XXXXXXY - YXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXY +3. Run the following query to associate to the newly created ``aiven`` user the public key, by replacing the ``PUBLIC_KEY`` placeholder with the output of the above command: -3. Run the following query to associate to the newly created ``aiven`` user the public key, by replacing the ``PUBLIC_KEY`` placeholder with the output of the above command:: + .. code:: - alter user aiven set RSA_PUBLIC_KEY='PUBLIC_KEY'; + alter user aiven set RSA_PUBLIC_KEY='PUBLIC_KEY'; Create a dedicated Snowflake role and assign the user ----------------------------------------------------- @@ -73,13 +84,17 @@ Creating a new role is strongly suggested to provide the minimal amount of privi 1. In the Snowflake UI, navigate to the **Worksheets** panel, and ensure to use a role with enough privileges (**SECURITYADMIN** or **ACCOUNTADMIN**) -2. Run the following query to create a role:: +2. Run the following query to create a role: - create role aiven_snowflake_sink_connector_role; + .. code:: -3. Run the following query to grant the role to the previously created user:: + create role aiven_snowflake_sink_connector_role; + +3. Run the following query to grant the role to the previously created user: + + .. code:: - grant role aiven_snowflake_sink_connector_role to user aiven; + grant role aiven_snowflake_sink_connector_role to user aiven; 4. Run the following query to alter the user making the new role default when logging in: diff --git a/docs/products/kafka/kafka-mirrormaker.rst b/docs/products/kafka/kafka-mirrormaker.rst index ece98f0ff4..35bbe2954d 100644 --- a/docs/products/kafka/kafka-mirrormaker.rst +++ b/docs/products/kafka/kafka-mirrormaker.rst @@ -18,7 +18,7 @@ Apache Kafka® represents the best in class data streaming solution. Apache Kafk Get started with Aiven for Apache Kafka® MirrorMaker 2 ------------------------------------------------------ -Take your first steps with Aiven for Apache Kafka® MirrorMaker 2 by following our :doc:`/docs/products/kafka/kafka-mirrormaker/getting-started` article, or browse through our full list of articles: +Take your first steps with Aiven for Apache Kafka® MirrorMaker 2 by following our :doc:`/docs/products/kafka/kafka-mirrormaker/get-started` article, or browse through our full list of articles: .. grid:: 1 2 2 2 diff --git a/docs/products/kafka/kafka-mirrormaker/concepts/replication-flow-topics-regex.rst b/docs/products/kafka/kafka-mirrormaker/concepts/replication-flow-topics-regex.rst index 5c84143144..28a50b5a9b 100644 --- a/docs/products/kafka/kafka-mirrormaker/concepts/replication-flow-topics-regex.rst +++ b/docs/products/kafka/kafka-mirrormaker/concepts/replication-flow-topics-regex.rst @@ -1,7 +1,7 @@ Topics included in a replication flow ===================================== -When :doc:`defining a replication flow <../howto/setup-replication-flow>` you need to define which topics in the source Apache Kafka® cluster to include or exclude from the cross-cluster replica. +When :doc:`defining a replication flow ` you need to define which topics in the source Apache Kafka® cluster to include or exclude from the cross-cluster replica. The **topics** parameter dictates which topics to include in the replica and can be provided as `list of regular expressions in Java format `_. The same is also valid for the **topics blacklist** parameter defining which topics to exclude. diff --git a/docs/products/kafka/kafka-mirrormaker/getting-started.rst b/docs/products/kafka/kafka-mirrormaker/get-started.rst similarity index 92% rename from docs/products/kafka/kafka-mirrormaker/getting-started.rst rename to docs/products/kafka/kafka-mirrormaker/get-started.rst index cd3abae0c9..07a8fbfede 100644 --- a/docs/products/kafka/kafka-mirrormaker/getting-started.rst +++ b/docs/products/kafka/kafka-mirrormaker/get-started.rst @@ -17,9 +17,9 @@ Create a new Aiven for Apache Kafka MirrorMaker 2 dedicated service: 1. Log in to the `Aiven Console `_ and select the **Aiven for Apache Kafka®** service for which you want to create a dedicated Aiven for Apache Kafka® MirrorMaker 2 service. -2. Scroll down to the **Service Integration** section on the service Overview page and select **Manage integrations**. +2. Click **Integrations** on the sidebar. -3. In the **Integrations** screen, select **Apache Kafka MirrorMaker 2**. +3. On the **Integrations** screen, choose **Apache Kafka MirrorMaker**. 4. Select the **New service** option. diff --git a/docs/products/kafka/kafka-mirrormaker/howto/setup-replication-flow.rst b/docs/products/kafka/kafka-mirrormaker/howto/setup-replication-flow.rst index 0ff9171ff8..2c0bf96b0e 100644 --- a/docs/products/kafka/kafka-mirrormaker/howto/setup-replication-flow.rst +++ b/docs/products/kafka/kafka-mirrormaker/howto/setup-replication-flow.rst @@ -13,16 +13,16 @@ To define a replication flow between a source Apache Kafka cluster and a target .. Note:: - If no Aiven for Apache Kafka MirrorMaker 2 are already defined, :doc:`you can create one in the Aiven console <../getting-started>`. + If no Aiven for Apache Kafka MirrorMaker 2 are already defined, :doc:`you can create one in the Aiven console <../get-started>`. 2. In the service **Overview** screen, scroll to the **Service integrations** section and select **Manage integrations**. 3. If there is no integration for the source/target Apache Kafka cluster, follow these steps to set up the necessary integrations: - - * On the Integrations screen, choose the desired integration from the list for the source Apache Kafka cluster. - * Select an existing Apache Kafka service you want to use as the source/target for the replication flow. - * Provide a cluster alias name (e.g., source-Kafka) to the integration of the Apache Kafka cluster. - * Repeat the above steps to set up the integration for the target Apache Kafka cluster and provide a cluster alias name. + + * On the Integrations screen, choose the desired integration from the list for the source Apache Kafka cluster. + * Select an existing Apache Kafka service you want to use as the source/target for the replication flow. + * Provide a cluster alias name (e.g., source-Kafka) to the integration of the Apache Kafka cluster. + * Repeat the above steps to set up the integration for the target Apache Kafka cluster and provide a cluster alias name. 4. Once that the source and target Apache Kafka clusters are configure, select **Replication flows** from the left sidebar. diff --git a/docs/products/kafka/kafka-mirrormaker/reference/terminology.rst b/docs/products/kafka/kafka-mirrormaker/reference/terminology.rst index 7373d88490..d0177d08d8 100644 --- a/docs/products/kafka/kafka-mirrormaker/reference/terminology.rst +++ b/docs/products/kafka/kafka-mirrormaker/reference/terminology.rst @@ -1,14 +1,6 @@ Terminology for Aiven for Apache Kafka® MirrorMaker 2 ===================================================== -.. _Terminology MM2ClusterAlias: - -Cluster alias: The name alias defined in MirrorMaker 2 for a certain Apache Kafka® source or target cluster. - -.. _Terminology MM2ReplicationFlow: - -Replication flow: The flow of data between two Apache Kafka® clusters (called source and target) executed by Apache Kafka® MirrorMaker 2. One Apache Kafka® MirrorMaker 2 service can execute multiple replication flows. - -.. _Terminology MM2RemoteTopics: - -Remote topics: Topics replicated by MirrorMaker 2 from a source Apache Kafka® cluster to a target Apache Kafka® cluster. There is only one source topic for each remote topic. Remote topics refer to the source cluster by the topic name prefix: ``{source_cluster_alias}.{source_topic_name}``. +- **Cluster alias**: The name alias defined in MirrorMaker 2 for a certain Apache Kafka® source or target cluster. +- **Replication flow**: The flow of data between two Apache Kafka® clusters (called source and target) executed by Apache Kafka® MirrorMaker 2. One Apache Kafka® MirrorMaker 2 service can execute multiple replication flows. +- **Remote topics**: Topics replicated by MirrorMaker 2 from a source Apache Kafka® cluster to a target Apache Kafka® cluster. There is only one source topic for each remote topic. Remote topics refer to the source cluster by the topic name prefix: ``{source_cluster_alias}.{source_topic_name}``. diff --git a/docs/products/kafka/karapace.rst b/docs/products/kafka/karapace.rst index 50c7509695..e9e9260487 100644 --- a/docs/products/kafka/karapace.rst +++ b/docs/products/kafka/karapace.rst @@ -1,11 +1,7 @@ Karapace ======== -What is Karapace? ------------------ -`Karapace `_ is an Aiven built open-source Schema Registry for Apache Kafka®, and provides a central repository to store and retrieve schemas. It consists of a **Schema Registry** and a **REST API**. All Kafka services on Aiven support both these features (Schema registry and REST API), and as a user, based on your requirements, you can enable or disable them. -Why Karapace? -------------- +`Karapace `_ is an Aiven built open-source Schema Registry for Apache Kafka®, and provides a central repository to store and retrieve schemas. It consists of a **Schema Registry** and a **REST API**. All Kafka services on Aiven support both these features (Schema registry and REST API), and as a user, based on your requirements, you can enable or disable them. Karapace supports storing schemas in a central repository, which clients can access to serialize and deserialize messages. The schemas also maintain their own version histories and can be checked for compatibility between their different respective versions. It also includes support for JSON Schema, Avro and Protobuf data formats. @@ -14,7 +10,7 @@ Karapace REST provides a RESTful interface to your Apache Kafka cluster, allowin Get started with Karapace ------------------------- -Take your first steps Karapace by following our :doc:`/docs/products/kafka/karapace/getting-started` article, or browse through other articles: +Take your first steps Karapace by following our :doc:`/docs/products/kafka/karapace/get-started` article, or browse through other articles: .. grid:: 1 2 2 2 diff --git a/docs/products/kafka/karapace/concepts/acl-definition.rst b/docs/products/kafka/karapace/concepts/acl-definition.rst index b8424716d5..74c597a5cc 100644 --- a/docs/products/kafka/karapace/concepts/acl-definition.rst +++ b/docs/products/kafka/karapace/concepts/acl-definition.rst @@ -73,5 +73,5 @@ The following table provides you with examples: The user that manages the ACLs is a superuser with write access to everything in the schema registry. In the Aiven Console, the superuser can view and modify all schemas in the Schema tab of a Kafka service. The superuser and its ACL entries are not visible in the Console but are added automatically by the Aiven platform. -The schema registry authorization feature enabled in :doc:`Karapace schema registry ` allows you to both authenticate the user, and additionally grant or deny access to individual `Karapace schema registry REST API endpoints `_ and filter the content the endpoints return. +The schema registry authorization feature enabled in :doc:`Karapace schema registry ` allows you to both authenticate the user, and additionally grant or deny access to individual `Karapace schema registry REST API endpoints `_ and filter the content the endpoints return. diff --git a/docs/products/kafka/karapace/getting-started.rst b/docs/products/kafka/karapace/get-started.rst similarity index 100% rename from docs/products/kafka/karapace/getting-started.rst rename to docs/products/kafka/karapace/get-started.rst diff --git a/docs/products/kafka/karapace/howto/enable-karapace.rst b/docs/products/kafka/karapace/howto/enable-karapace.rst index 3525bece24..39e53f7c0f 100644 --- a/docs/products/kafka/karapace/howto/enable-karapace.rst +++ b/docs/products/kafka/karapace/howto/enable-karapace.rst @@ -1,17 +1,22 @@ -Enable Karapace schema registry and REST APIs +Enable Karapace schema registry and REST APIs ============================================= -To enable **Karapace schema registry** and **REST APIs** on Aiven for Apache Kafka® from the Aiven Console, follow these steps: +To enable **Karapace schema registry** and **REST APIs** on Aiven for Apache Kafka® from the Aiven Console, follow these steps: -1. In the `Aiven Console `_, click on the service to view its overview screen. -2. Look for **Schema Registry (Karapace)** or **Apache Kafka REST API (Karapace)**, and enable the setting for either one of the features based on your requirements. +1. In the `Aiven Console `_, select your project and then choose your Aiven for Apache Kafka® service. +2. Click **Service settings** on the sidebar. +3. In the **Service management** section, click **Actions** (**...**). +4. From the dropdown menu, enable the setting for either or both of the features based on your requirements: -To learn more about Karapace and its features, see the `Karapace homepage `_ and `GitHub project `_. + a. Click **Enable REST API (Karapace)**. Confirm your choice in the dialog by clicking **Enable**. + b. Click **Enable Schema Registry (Karapace)**. Confirm your choice in the dialog by clicking **Enable**. -.. tip:: - If you use any of our automation or other integrations, the parameters to enable schema registry or REST APIs on your service are named ``schema_registry`` and ``kafka_rest``. +To learn more about Karapace and its features, visit the [Karapace homepage](https://www.karapace.io) and the [Karapace GitHub project](https://github.com/aiven/karapace). + +.. tip:: + For automation or integration setups, use the parameters ``schema_registry`` and ``kafka_rest`` to enable the schema registry or REST APIs on your service. More resources -------------- -For more information on how to setup Karapace with Aiven for Apache Kafka® using `Aiven Terraform Provider `_, see `Apache Kafka® with Karapace Schema Registry `_. - +For instructions on setting up Karapace with Aiven for Apache Kafka® using `Aiven Terraform Provider `_, see `Apache Kafka® with Karapace Schema Registry `_. + \ No newline at end of file diff --git a/docs/products/kafka/karapace/howto/enable-oauth-oidc-kafka-rest-proxy.rst b/docs/products/kafka/karapace/howto/enable-oauth-oidc-kafka-rest-proxy.rst index 1e2fcbef47..376d2b3685 100644 --- a/docs/products/kafka/karapace/howto/enable-oauth-oidc-kafka-rest-proxy.rst +++ b/docs/products/kafka/karapace/howto/enable-oauth-oidc-kafka-rest-proxy.rst @@ -36,7 +36,7 @@ To establish OAuth2/OIDC authentication for the Karapace REST proxy, complete th Prerequisites ``````````````` -* :doc:`Aiven for Apache Kafka® ` service running with :doc:`OAuth2/OIDC enabled `. +* :doc:`Aiven for Apache Kafka® ` service running with :doc:`OAuth2/OIDC enabled `. * :doc:`Karapace schema registry and REST APIs enabled `. * Ensure access to an OIDC-compliant provider, such as Auth0, Okta, Google Identity Platform, or Azure. diff --git a/docs/products/kafka/karapace/howto/enable-schema-registry-authorization.rst b/docs/products/kafka/karapace/howto/enable-schema-registry-authorization.rst index 84fe3bdb47..d974d27547 100644 --- a/docs/products/kafka/karapace/howto/enable-schema-registry-authorization.rst +++ b/docs/products/kafka/karapace/howto/enable-schema-registry-authorization.rst @@ -7,9 +7,11 @@ Most Aiven for Apache Kafka® services will automatically have :doc:`schema regi avn service update --enable-schema-registry-authorization SERVICE_NAME -2. You can similarly disable the Karapace schema registry authorization using:: +2. You can similarly disable the Karapace schema registry authorization using: - avn service update --disable-schema-registry-authorization SERVICE_NAME + .. code:: + + avn service update --disable-schema-registry-authorization SERVICE_NAME .. warning:: Enabling Karapace schema registry authorization can disrupt access for users if the access control rules have not been configured to allow this. For more information, see :doc:`Manage Karapace schema registry authorization `. diff --git a/docs/products/kafka/reference/kstream-data-write-issue.rst b/docs/products/kafka/reference/kstream-data-write-issue.rst deleted file mode 100644 index 9293ebaf9d..0000000000 --- a/docs/products/kafka/reference/kstream-data-write-issue.rst +++ /dev/null @@ -1,21 +0,0 @@ -Resolving data write issue on Apache Kafka® Streams 3.6.0 -========================================================= - -Issue description ------------------- -If you are encountering an ``UNKNOWN_SERVER_ERROR`` while producing data using Apache Kafka® Streams version 3.6.0, it may result in the prevention of data writing. This issue has been identified and logged in the Apache Kafka® project's issue tracker with the reference: `Kafka-15653 `_. - -This could lead to errors in broker logs as, - -.. code-block:: bash - - "Error processing append operation on partition XXXX (kafka.server.ReplicaManager) java.lang.NullPointerException." - - -Solution --------- -To address this issue, it is recommended to upgrade your Apache Kafka® Streams clients to version 3.6.1. This version contains the necessary fixes to resolve the ``UNKNOWN_SERVER_ERROR``. - - -.. Note:: - If the issue persists even after upgrading Apache Kafka® Streams to 3.6.1 version, please reach out to our support team for further assistance. \ No newline at end of file diff --git a/docs/products/m3db.rst b/docs/products/m3db.rst index 99f33d4314..a923cfe92d 100644 --- a/docs/products/m3db.rst +++ b/docs/products/m3db.rst @@ -1,18 +1,11 @@ Aiven for M3 ============ -What is Aiven for M3? ---------------------- - Aiven for M3 is a fully managed **distributed time series database**, deployable in the cloud of your choice which can bring unlimited scalability and high-availability to your monitoring environment and other time series applications. Aiven for M3 consists of ``n`` number of **M3DB** and **M3 Coordinator** pairs (where ``n`` is the number of nodes as chosen for your Aiven plan). - -Why M3? -------- - -M3 is a specialized time series store which is a great choice if your organization has a very large volume of metrics to handle, and it can be used as part of your observability solution. It is optimized for storing and serving time series through associated pairs of times and values. It also provides a reverse index of time series. +M3 is a great choice if your organization has a very large volume of metrics to handle, and it can be used as part of your observability solution. It is optimized for storing and serving time series through associated pairs of times and values. It also provides a reverse index of time series. .. note:: Aiven offers M3 because we ourselves needed a solution that would work with the size of our own metrics - and we love it! @@ -23,7 +16,7 @@ Read more about `the M3 components `_ Get started with Aiven for M3 ----------------------------- -Take your first steps with Aiven for M3 by following our :doc:`/docs/products/m3db/getting-started` article, or browse through our full list of articles: +Take your first steps with Aiven for M3 by following our :doc:`/docs/products/m3db/get-started` article, or browse through our full list of articles: .. grid:: 1 2 2 2 diff --git a/docs/products/m3db/concepts/scaling-m3.rst b/docs/products/m3db/concepts/scaling-m3.rst index d703d58885..42ec05c23e 100644 --- a/docs/products/m3db/concepts/scaling-m3.rst +++ b/docs/products/m3db/concepts/scaling-m3.rst @@ -70,7 +70,7 @@ Read more about this configuration `in the M3DB documentation ` that your namespace configuration requires. To increase the number of total blocks available, we recommend increasing the number of nodes in your setup. +There is a limit to the number of files that each Aiven M3DB node can support. The number of files increases with the number of :doc:`block shards ` that your namespace configuration requires. To increase the number of total blocks available, we recommend increasing the number of nodes in your setup. CPU usage --------- diff --git a/docs/products/m3db/getting-started.rst b/docs/products/m3db/get-started.rst similarity index 100% rename from docs/products/m3db/getting-started.rst rename to docs/products/m3db/get-started.rst diff --git a/docs/products/m3db/howto/grafana.rst b/docs/products/m3db/howto/grafana.rst index e5e34badf5..382cff9f3a 100644 --- a/docs/products/m3db/howto/grafana.rst +++ b/docs/products/m3db/howto/grafana.rst @@ -7,9 +7,9 @@ Integrate M3DB and Grafana -------------------------- 1. Log into `Aiven Console `_ and select your Aiven for M3DB service. -2. On the service overview page, scroll to **Service integrations** and select **Manage Integrations**. +2. Click **Integrations** on the sidebar. 3. Select **Monitor Data in Grafana**. -4. Choose either a new or existing service. +4. Choose either a new service or existing service. - When creating a new service you will need to select the cloud, region and plan to use. You should also give your service a name. The service overview page shows the nodes rebuilding, and then indicates when they are ready. - If you're already using Grafana on Aiven, you can integrate your M3DB as a data source for that existing Grafana. diff --git a/docs/products/m3db/howto/telegraf.rst b/docs/products/m3db/howto/telegraf.rst index 361a44ad5e..6a86630ce8 100644 --- a/docs/products/m3db/howto/telegraf.rst +++ b/docs/products/m3db/howto/telegraf.rst @@ -19,7 +19,9 @@ Variable Description Configuring Telegraf InfluxDB® output plugin for M3 --------------------------------------------------- -Below is an example of how to configure Telegraf to send metrics to M3 using the InfluxDB line-protocol. These lines belong in the **output plugins** section of your Telegraf configuration file:: +Below is an example of how to configure Telegraf to send metrics to M3 using the InfluxDB line-protocol. These lines belong in the **output plugins** section of your Telegraf configuration file: + +.. code:: # Configuration for sending metrics to M3 [[outputs.influxdb]] @@ -42,7 +44,9 @@ Below is an example of how to configure Telegraf to send metrics to M3 using the Configuring Telegraf Prometheus remote write for M3 --------------------------------------------------- -Here's an example of how to configure Telegraf to send metrics to M3 using the Prometheus remote write protocol. These lines go in the output plugins section of the Telegraf configuration file:: +Here's an example of how to configure Telegraf to send metrics to M3 using the Prometheus remote write protocol. These lines go in the output plugins section of the Telegraf configuration file: + +.. code:: # Configuration for sending metrics to M3 [outputs.http] diff --git a/docs/products/m3db/howto/write-go.rst b/docs/products/m3db/howto/write-go.rst index f2c72c29ab..45064bf04f 100644 --- a/docs/products/m3db/howto/write-go.rst +++ b/docs/products/m3db/howto/write-go.rst @@ -19,9 +19,11 @@ Pre-requisites For this example you will need: -1. The Prometheus client for Go:: +1. The Prometheus client for Go: - go get -u github.com/m3db/prometheus_remote_client_golang/promremote + .. code:: + + go get -u github.com/m3db/prometheus_remote_client_golang/promremote diff --git a/docs/products/m3db/reference/terminology.rst b/docs/products/m3db/reference/terminology.rst index 8fec77ae98..3c9159ca74 100644 --- a/docs/products/m3db/reference/terminology.rst +++ b/docs/products/m3db/reference/terminology.rst @@ -1,19 +1,8 @@ Terminology for M3 ================== -Time series - Unique series of data identified by a set of labels, containing value(s) for specific points in time - -Point - A value and timestamp pair. This item can also include labels to denote which series of data this point belongs to. - -Namespace - A collection of data points. Namespaces are either unaggregated (includes all points as they arrived) or aggregated (with only one data point stored for each duration configured for the namespace). - -Replication factor - M3 stores data redundantly (if replication factor > 1), and uses by default quorum writes and best-effort quorum reads. In Aiven for M3, replication factor is hardcoded at 3, which means that single availability zone's worth of nodes can be lost and database still stays functional. - -.. _Terminology Shard: - -Shard - M3 databases have their storage partitioned over multiple shards (Aiven for M3 services usually have 60 shards, although this can be changed by request). The M3 namespaces are spread across the shards and replicated based on their replication factor. Beware that changing the number of shards will cause all data to be lost, however the default value of 60 works well for most use cases. +- **Time series:** Unique series of data identified by a set of labels, containing value(s) for specific points in time +- **Point:** A value and timestamp pair. This item can also include labels to denote which series of data this point belongs to. +- **Namespace:** A collection of data points. Namespaces are either unaggregated (includes all points as they arrived) or aggregated (with only one data point stored for each duration configured for the namespace). +- **Replication factor:** M3 stores data redundantly (if replication factor > 1), and uses by default quorum writes and best-effort quorum reads. In Aiven for M3, replication factor is hardcoded at 3, which means that single availability zone's worth of nodes can be lost and database still stays functional. +- **Shard:** M3 databases have their storage partitioned over multiple shards (Aiven for M3 services usually have 60 shards, although this can be changed by request). The M3 namespaces are spread across the shards and replicated based on their replication factor. Beware that changing the number of shards will cause all data to be lost, however the default value of 60 works well for most use cases. diff --git a/docs/products/mysql/concepts/max-number-of-connections.rst b/docs/products/mysql/concepts/max-number-of-connections.rst index eb271da79b..1fb53a4372 100644 --- a/docs/products/mysql/concepts/max-number-of-connections.rst +++ b/docs/products/mysql/concepts/max-number-of-connections.rst @@ -4,33 +4,34 @@ MySQL ``max_connections`` ``max_connections`` vs RAM -------------------------- -The maximum number of simultaneous connections in Aiven for MySQL® depends on how much RAM your service plan offers and is fixed for each service plan (`Hobbyist`, `Startup`, `Business`, `Premium`, or any other plan available under a specific cloud provider.) +The maximum number of simultaneous connections in Aiven for MySQL® depends on how much RAM your service plan offers and is fixed for each service plan (**Hobbyist**, **Startup**, **Business**, **Premium**, or any other plan available under a specific cloud provider.) .. note:: - Independent of the plan, an ``extra_connection`` with a value of ``1`` is added for the system process. + + Independent of the plan, an ``extra_connection`` with a value of ``1`` is added for the system process. Plans under 4 GiB ----------------- For plans under 4 GiB of RAM, the number of allowed connections is |mysql_connections_per_<4G| per GiB: - :math:`{max\_connections} =` |mysql_connections_per_<4G| |mysql_max_connections| +:math:`{max\_connections} =` |mysql_connections_per_<4G| |mysql_max_connections| .. topic:: Example With 2 GiB of RAM, the maximum number of connections is - :math:`{max\_connections} =` |mysql_connections_per_<4G| x 2 + 1 + :math:`{max\_connections} =` |mysql_connections_per_<4G| x 2 + 1 Plans with 4 GiB or more ------------------------ For plans higher or equal to 4 GiB, the number of allowed connections is |mysql_connections_per_>4G| per GiB: - :math:`{max\_connections} =` |mysql_connections_per_>4G| |mysql_max_connections| +:math:`{max\_connections} =` |mysql_connections_per_>4G| |mysql_max_connections| .. topic:: Example With 7 GiB of RAM, the maximum number of connections is - :math:`{max\_connections} =` |mysql_connections_per_>4G| x 7 + 1 + :math:`{max\_connections} =` |mysql_connections_per_>4G| x 7 + 1 .. include:: /includes/platform-variables.rst \ No newline at end of file diff --git a/docs/products/mysql/concepts/mysql-memory-usage.rst b/docs/products/mysql/concepts/mysql-memory-usage.rst index b6031eb53e..59ab955304 100644 --- a/docs/products/mysql/concepts/mysql-memory-usage.rst +++ b/docs/products/mysql/concepts/mysql-memory-usage.rst @@ -1,5 +1,5 @@ -Understanding MySQL memory usage -================================== +Understand MySQL memory usage +============================= **MySQL memory utilization can appear high, even if the service is relatively idle.** diff --git a/docs/products/mysql/concepts/mysql-replication.rst b/docs/products/mysql/concepts/mysql-replication.rst index 368b189e9a..2b88f2127d 100644 --- a/docs/products/mysql/concepts/mysql-replication.rst +++ b/docs/products/mysql/concepts/mysql-replication.rst @@ -21,7 +21,7 @@ If a statement like the above matched 500 rows and the table had a million rows Replication use in Aiven for MySQL ---------------------------------- -The considerations presented on the :ref:`Replication Overview ` section are not only valid for services that actually have standby nodes or read-only replicas. Whenever the Aiven management platform needs to create a new node for a service, the node is first initialized from backup to most recent backed up state. This includes applying the full replication stream that has been created after the most recent full base backup. Once the latest backed +The considerations presented on the :ref:`Replication overview ` section are not only valid for services that actually have standby nodes or read-only replicas. Whenever the Aiven management platform needs to create a new node for a service, the node is first initialized from backup to most recent backed up state. This includes applying the full replication stream that has been created after the most recent full base backup. Once the latest backed up state has been restored the node will connect to the current master, if available, and replicate latest state from that, which is also affected by possible replication slowness. When new nodes are created, it needs to perform replication and having large tables without primary keys may make operations such as replacing failed nodes, upgrading service plan, migrating service to a different cloud provider or region, starting up new read-only replica service, forking a service, and some others to take extremely long time or depending on the situation practically not complete at all without manual operator intervention (e.g. new read-only replica might never be able to catch up with existing master because replication is too slow). diff --git a/docs/products/mysql/concepts/mysql-tuning-and-concurrency.rst b/docs/products/mysql/concepts/mysql-tuning-and-concurrency.rst index bcc106bc9a..d1ab89f1d4 100644 --- a/docs/products/mysql/concepts/mysql-tuning-and-concurrency.rst +++ b/docs/products/mysql/concepts/mysql-tuning-and-concurrency.rst @@ -12,9 +12,10 @@ There are several key calculations which are fundamental to tuning: - Thread buffers - Concurrency -.. Important:: - | **Query output is for reference only** - | Queries should be run per service for accuracy and re-evaluated periodically for change. +.. important:: + Query output is for reference only. + + Queries should be run per service for accuracy and re-evaluated periodically for change. Service memory @@ -22,9 +23,9 @@ Service memory The :doc:`service memory ` can be calculated as: - |service_memory| +|service_memory| -where the overhead is currently |vm_overhead| +where the overhead is currently |vm_overhead|. Global buffers @@ -82,10 +83,12 @@ Queries may use part or all of the allocation. | 17.9375 | +-------------------+ -.. Important:: - | **The actual amount of memory a query could use is technically unbounded.** - | Uncontrolled memory allocations and temporary table usage can adversely affect memory allocation. - | The data dictionary size is based on the number of tables, fields and indexes within the database. +.. important:: + + The actual amount of memory a query could use is technically unbounded. + + Uncontrolled memory allocations and temporary table usage can adversely affect memory allocation. + The data dictionary size is based on the number of tables, fields and indexes within the database. Concurrency @@ -104,19 +107,21 @@ The :doc:`max_connections ` parameter is based off th | 226 | +-------------------+ -.. Important:: - **This parameter should be used as a guideline only**. +.. important:: + + This parameter should be used as a guideline only. - By default, ``max_connections`` is configured for *optimistic* concurrency using all available memory. + By default, ``max_connections`` is configured for *optimistic* concurrency using all available memory. - In many instances, if the ``max connections`` are fully utilized, resource overcommitment and :doc:`/docs/platform/concepts/out-of-memory-conditions` will occur. + + In many instances, if the ``max connections`` are fully utilized, resource overcommitment and :doc:`/docs/platform/concepts/out-of-memory-conditions` will occur. At ~18 MB per connection, a 4 GiB service has a potential memory usage of 4068 MB (18 * 226). This is less than the service RAM, but exceeds the :doc:`service memory limit `. **For performance and stability, the following calculation is recommended:** - :math:`max\_concurrency =` |mysql_max_concurrency| +:math:`max\_concurrency =` |mysql_max_concurrency| This value may be pessimistic for a workload that does not require the full thread buffer, but is an advisable starting point for concurrency testing and monitoring. Concurrency can be incremented, if service memory permits. diff --git a/docs/products/mysql/get-started.rst b/docs/products/mysql/get-started.rst index 36823ba617..22ae3177c8 100644 --- a/docs/products/mysql/get-started.rst +++ b/docs/products/mysql/get-started.rst @@ -1,7 +1,7 @@ Getting started with Aiven for MySQL® ===================================== -Aiven for MySQL® services are managed from `Aiven Console `__ . +Aiven for MySQL® services are managed from `Aiven Console `__. Start a service --------------- @@ -14,7 +14,7 @@ Start a service This view shows the connection parameters for your MySQL service and its current status. -3. You can make changes to the service configuration in the **Overview** page, even while the service is being built. +3. On the **Overview** page, select **Service settings** from the sidebar to access the **Advanced configuration** section and make changes to the service configuration, even while the service is being built. You can find the available configuration options in the :doc:`reference article `. @@ -27,11 +27,13 @@ Next steps ---------- * Learn how to connect to MySQL: - - :doc:`From the command line ` - - :doc:`With MySQL workbench ` + + - :doc:`From the command line ` + - :doc:`With MySQL workbench ` * Create additional databases: - - :doc:`Create your database ` + + - :doc:`Create your database ` * Connect from your own :doc:`Python application `. diff --git a/docs/products/mysql/howto/connect-with-java.rst b/docs/products/mysql/howto/connect-with-java.rst index f56d9ad6bd..c215da3920 100644 --- a/docs/products/mysql/howto/connect-with-java.rst +++ b/docs/products/mysql/howto/connect-with-java.rst @@ -53,8 +53,10 @@ Run the code after replacement of the placeholders with values for your project: javac MySqlExample.java && java -cp mysql-driver-8.0.28.jar:. MySqlExample -host MYSQL_HOST -port MYSQL_PORT -database MYSQL_DATABASE -username avnadmin -password MYSQL_PASSWORD -If the script runs successfully, the output will be the values that were inserted into the table:: +If the script runs successfully, the output will be the values that were inserted into the table: - Version: 8.0.26 +.. code:: + + Version: 8.0.26 Now that your application is connected, you are all set to use Java with Aiven for MySQL. diff --git a/docs/products/mysql/howto/connect-with-php.rst b/docs/products/mysql/howto/connect-with-php.rst index 2bea1b8ed8..9e39157066 100644 --- a/docs/products/mysql/howto/connect-with-php.rst +++ b/docs/products/mysql/howto/connect-with-php.rst @@ -18,10 +18,17 @@ Pre-requisites -------------- * :doc:`/docs/platform/howto/download-ca-cert` from `Aiven Console `__ > the **Overview** page of your service. This example assumes it is in a local file called ``ca.pem``. +* Make sure you have read/write permissions to the `ca.pem` file and you add an absolute path to this file into :ref:`the code `: + + .. code-block:: bash + + $conn .= ";sslmode=verify-ca;sslrootcert='D:/absolute/path/to/ssl/certs/ca.pem'" .. note:: Your PHP installation needs to include the `MySQL functions `_ (most installations have this already). +.. _connect-mysql-php-code: + Code ---- diff --git a/docs/products/mysql/howto/connect-with-python.rst b/docs/products/mysql/howto/connect-with-python.rst index d9cdd40fee..2316cdfbb5 100644 --- a/docs/products/mysql/howto/connect-with-python.rst +++ b/docs/products/mysql/howto/connect-with-python.rst @@ -27,13 +27,17 @@ For this example you will need: * Python 3.7 or later -* The Python ``PyMySQL`` library. You can install this with ``pip``:: +* The Python ``PyMySQL`` library. You can install this with ``pip``: + + .. code:: + + pip install pymysql - pip install pymysql +* Install ``cryptography`` package: -* Install ``cryptography`` package:: - - pip install cryptography + .. code:: + + pip install cryptography Code '''' diff --git a/docs/products/mysql/howto/create-database.rst b/docs/products/mysql/howto/create-database.rst index 936df5d843..0654694859 100644 --- a/docs/products/mysql/howto/create-database.rst +++ b/docs/products/mysql/howto/create-database.rst @@ -15,4 +15,4 @@ To create a new MySQL® database, take the following steps: .. Tip:: - You can also use the :ref:`Aiven client ` or the :doc:`MySQL client` to create your database from the CLI. + You can also use the :ref:`Aiven client ` or the :doc:`MySQL client ` to create your database from the CLI. diff --git a/docs/products/mysql/howto/create-missing-primary-keys.rst b/docs/products/mysql/howto/create-missing-primary-keys.rst index a63d478b7b..c4a2799872 100644 --- a/docs/products/mysql/howto/create-missing-primary-keys.rst +++ b/docs/products/mysql/howto/create-missing-primary-keys.rst @@ -87,7 +87,7 @@ When executing the ``ALTER TABLE`` statement for a large table, you may encounte Creating index 'PRIMARY' required more than 'mysql.innodb_online_alter_log_max_size' bytes of modification log. Please try again. -For the operation to succeed, you need to set a value that is high enough. Depending on the table size, this could be a few gigabytes or even more for very large tables. You can change ``mysql.innodb_online_alter_log_max_size`` as follows: `Aiven Console `_ > your Aiven for MySQL service > the **Overview** page of your service > the **Advanced configuration** section > **Change** > **Add configuration option** > ``mysql.innodb_online_alter_log_max_size`` > set a value > **Save advanced configuration**. +For the operation to succeed, you need to set a value that is high enough. Depending on the table size, this could be a few gigabytes or even more for very large tables. You can change ``mysql.innodb_online_alter_log_max_size`` as follows: `Aiven Console `_ > your Aiven for MySQL service's page > the **Service settings** page of the service > the **Advanced configuration** section > **Configure** > **Add configuration options** > ``mysql.innodb_online_alter_log_max_size`` > set a value > **Save configuration**. .. seealso:: diff --git a/docs/products/mysql/howto/create-tables-without-primary-keys.rst b/docs/products/mysql/howto/create-tables-without-primary-keys.rst index 889584e9b7..4d30dab427 100644 --- a/docs/products/mysql/howto/create-tables-without-primary-keys.rst +++ b/docs/products/mysql/howto/create-tables-without-primary-keys.rst @@ -4,18 +4,22 @@ Create new tables without primary keys If your Aiven for MySQL® service was created after 2020-06-03, by default it does not allow creating new tables without primary keys. You can check this by taking the following steps: 1. Log in to `Aiven Console `_. -2. In the **Services** page, select your Aiven for MySQL service that you want to check. -3. In the **Overview** page of your service, scroll down to the **Advanced configuration** section. -4. Check the **Advanced configuration** section for the ``mysql.sql_require_primary_key`` parameter and its status. - -If ``mysql.sql_require_primary_key`` is enabled, your Aiven for MySQL does not allow you to create new tables without primary keys. Attempts to create tables without primary keys will result in the following error message:: - - Unable to create or change a table without a primary key, when the system variable 'sql_require_primary_key' is set. Add a primary key to the table or unset this variable to avoid this message. Note that tables without a primary key can cause performance problems in row-based replication, so please consult your DBA before changing this setting. - -If creating tables without primary keys is prevented and the table that you're trying to create is known to be small, you may override this setting and create the table anyway. - +2. On the **Services** page, select your Aiven for MySQL service that you want to check. +3. On your service's page, select **Service settings** from the sidebar. +4. On the **Service settings** page of your service, scroll down to the **Advanced configuration** section. +5. Check the **Advanced configuration** section for the ``mysql.sql_require_primary_key`` parameter and its status. + + If ``mysql.sql_require_primary_key`` is enabled, your Aiven for MySQL does not allow you to create new tables without primary keys. Attempts to create tables without primary keys will result in the following error message: + + .. code:: + + Unable to create or change a table without a primary key, when the system variable 'sql_require_primary_key' is set. Add a primary key to the table or unset this variable to avoid this message. Note that tables without a primary key can cause performance problems in row-based replication, so please consult your DBA before changing this setting. + + If creating tables without primary keys is prevented and the table that you're trying to create is known to be small, you may override this setting and create the table anyway. + .. seealso:: - You can read more about the MySQL replication in the :ref:`Replication overview ` article. + + You can read more about the MySQL replication in the :ref:`Replication overview ` article. You have two options to create the tables: @@ -28,13 +32,14 @@ You have two options to create the tables: * Disabling ``mysql.sql_require_primary_key`` parameter. To disable the ``mysql.sql_require_primary_key`` parameter, take the following steps: 1. Log in to `Aiven Console `_. -2. In the **Services** page, select your Aiven for MySQL service that you want to check. -3. In the **Overview** page of your service, scroll down to the **Advanced configuration** section and select **Change**. -4. In the **Edit advanced configuration** window, find ``mysql.sql_require_primary_key`` and disable it by using the toggle switch. Select **Save advanced configuration**. +2. On the **Services** page, select your Aiven for MySQL service that you want to check. +3. On your service's page, select **Service settings** from the sidebar. +4. On the **Service settings** page of your service, scroll down to the **Advanced configuration** section and select **Configure**. +5. In the **Advanced configuration** window, find ``mysql.sql_require_primary_key`` and disable it by using the toggle switch. Select **Save configuration**. - .. warning:: + .. warning:: - It is only recommended to use this approach when the table is created by an external application and using the session variable is not an option. To prevent more problematic tables from being unexpectedly created in the future you should enable the setting again once you finished creating the tables without primary keys. + It is only recommended to use this approach when the table is created by an external application and using the session variable is not an option. To prevent more problematic tables from being unexpectedly created in the future you should enable the setting again once you finished creating the tables without primary keys. .. seealso:: diff --git a/docs/products/mysql/howto/disable-foreign-key-checks.rst b/docs/products/mysql/howto/disable-foreign-key-checks.rst index 3e6fe5b1b9..03b3fb3d8c 100644 --- a/docs/products/mysql/howto/disable-foreign-key-checks.rst +++ b/docs/products/mysql/howto/disable-foreign-key-checks.rst @@ -1,9 +1,11 @@ Disable foreign key checks ========================== -All Aiven for MySQL® services have foreign key checks enabled by default helping in keeping referential integrity across tables. However, you might want to disable it for a particular session. For example, when migrating to an Aiven for MySQL you may face errors related to foreign key violations similar to:: +All Aiven for MySQL® services have foreign key checks enabled by default helping in keeping referential integrity across tables. However, you might want to disable it for a particular session. For example, when migrating to an Aiven for MySQL you may face errors related to foreign key violations similar to: - ERROR 3780 (HY000) at line 11596: Referencing column 'g_id' and referenced column 'g_id' in foreign key constraint 'FK_33b11dcfac6148578da087b07c2f388f' are incompatible. +.. code:: + + ERROR 3780 (HY000) at line 11596: Referencing column 'g_id' and referenced column 'g_id' in foreign key constraint 'FK_33b11dcfac6148578da087b07c2f388f' are incompatible. The following explains how to temporarily disable Aiven for MySQL foreign key checking for the duration of a session. diff --git a/docs/products/mysql/howto/do-check-service-migration.rst b/docs/products/mysql/howto/do-check-service-migration.rst index 526d724ef2..08c0552987 100644 --- a/docs/products/mysql/howto/do-check-service-migration.rst +++ b/docs/products/mysql/howto/do-check-service-migration.rst @@ -37,7 +37,7 @@ You can create the task of migration, for example, from a MySQL DB to an Aiven s .. code-block:: shell - avn service task-create --operation migration_check --source-service-uri mysql://user:password@host:port/databasename --project MY_PROJECT_NAME mysql + avn service task-create --project PROJECT_NAME --operation migration_check --source-service-uri mysql://user:password@host:port/databasename --project MY_PROJECT_NAME mysql You can see the information about the task including the ID. @@ -54,9 +54,11 @@ You can see the information about the task including the ID. **Step 2: retrieve your task's status.** -You can check the status of your task by running:: +You can check the status of your task by running: - avn service task-get --task-id e2df7736-66c5-4696-b6c9-d33a0fc4cbed --project MY_PROJECT_NAME mysql +.. code:: + + avn service task-get --project PROJECT_NAME --task-id e2df7736-66c5-4696-b6c9-d33a0fc4cbed --project MY_PROJECT_NAME mysql You can find whether the operation succeeds and more relevant information about the migration. diff --git a/docs/products/mysql/howto/enable-slow-queries.rst b/docs/products/mysql/howto/enable-slow-queries.rst index 3c6ac28965..66f2e62ee0 100644 --- a/docs/products/mysql/howto/enable-slow-queries.rst +++ b/docs/products/mysql/howto/enable-slow-queries.rst @@ -22,12 +22,12 @@ Follow these steps to enable your slow queries in your Aiven for MySQL service v 1. Log in to `Aiven Console `_. 2. In the **Services** page, select your Aiven for MySQL service. -3. In the **Overview** page of your service, scroll down to the **Advanced configuration** section and select **Change**. -4. In the **Edit advanced configuration** window +3. In the **Service settings** page of your service, scroll down to the **Advanced configuration** section and select **Configure**. +4. In the **Advanced configuration** window - 1. Select **Add configuration option**. From the unfolded list, choose ``mysql.slow_query_log``. Enable ``mysql.slow_query_log`` by toggling it to ``On``. By default, ``mysql.slow_query_log`` is disabled. - 2. Select **Add configuration option**. From the unfolded list, choose ``mysql.long_query_time``. Set ``mysql.long_query_time`` according to your specific need. - 3. Select **Save advanced configuration**. + 1. Select **Add configuration options**. From the unfolded list, choose ``mysql.slow_query_log``. Enable ``mysql.slow_query_log`` by toggling it to ``On``. By default, ``mysql.slow_query_log`` is disabled. + 2. Select **Add configuration options**. From the unfolded list, choose ``mysql.long_query_time``. Set ``mysql.long_query_time`` according to your specific need. + 3. Select **Save configuration**. Your Aiven for MySQL service can now log slow queries. If you want to simulate slow queries to check this feature, check the next section. @@ -72,5 +72,3 @@ You can expect to receive an output similar to the following: .. warning:: Disabling the ``mysql.slow_query_log`` setting truncates the ``mysql.slow_query_log`` table. Make sure to back up the data from the ``mysql.slow_query_log`` table in case you need it for further analysis. - - diff --git a/docs/products/mysql/howto/migrate-db-to-aiven-via-console.rst b/docs/products/mysql/howto/migrate-db-to-aiven-via-console.rst index 399a9e785f..9b1feb886a 100644 --- a/docs/products/mysql/howto/migrate-db-to-aiven-via-console.rst +++ b/docs/products/mysql/howto/migrate-db-to-aiven-via-console.rst @@ -16,15 +16,14 @@ The console migration tool enables you to migrate MySQL databases to managed MyS * Cloud-hosted MySQL databases * Managed MySQL database clusters on Aiven. -With the console migration tool, you can migrate your data using either the :ref:`continuous migration method ` (default and recommended) or the :ref:`one-time snapshot method ` (``mysqldump``). +The console migration tool provides 2 migration methods: -.. _continuous-migration: +- **(Recommended) Continuous migration:** Used by default in the tool and taken as a method to follow in this guide. This method uses logical replication so that data transfer is possible not only for existing data in the source database when triggering the migration but also for any data written to the source database during the migration. -* Recommended continuous migration method is used by default in the tool and taken as a method to follow in this guide. This method uses logical replication so that data transfer is possible not only for the data that has already been there in the source database when triggering the migration but also for any data written to the source database during the migration. +- **mysqldump**: Exports the current contents of the source database into a text file and imports it to the target database. + Any changes written to the source database during the migration are **not transferred**. -.. _mysqldump-migration: - -* ``mysqldump`` exports current contents of the source database into a text file and imports it to the target database. Any changes written to the source database during the migration are not transferred. When you trigger the migration setup in the console and initial checks detect that your source database does not support the logical replication, you are notified about it via wizard. To continue with the migration, you can select the alternative ``mysqldump`` migration method in the wizard. + When you trigger the migration setup in the console and initial checks detect that your source database does not support the logical replication, you are notified about it via the migration wizard. To continue with the migration, you can select the alternative ``mysqldump`` migration method in the wizard. Prerequisites ------------- @@ -115,7 +114,7 @@ Pre-configure the source .. code-block:: bash - GRANT ALL ON .* TO ‘username'@‘%'; + GRANT ALL ON DATABASE_NAME.* TO USERNAME_CONNECTING_TO_SOURCE_DB; Reload the grant tables to apply the changes to the permissions. @@ -130,15 +129,16 @@ Pre-configure the source Migrate a database ------------------ -1. Log in to `Aiven Console `_. -2. From the **Services** list, select the service where your target database is located. -3. In the **Overview** page of the selected service, navigate to the **Migrate database** section and select **Set up migration**. -4. Guided by the **MySQL migration configuration guide** wizard, go through all the migration steps. +1. Log in to the `Aiven Console `_. +2. On the **Services** page, select the service where your target database is located. +3. From the sidebar on your service's page, select **Service settings**. +4. On the **Service settings** page, navigate to the **Service management** section, and select **Import database**. +5. Guided by the migration wizard, go through all the migration steps. Step 1 - configure '''''''''''''''''' -Make sure your configuration is in line with **Guidelines for successful database migration** provided in the migration wizard and select **Get started**. +Get familiar **Guidelines for successful database migration** provided in the **MySQL migration configuration guide** window, make sure your configuration is in line with them, and select **Get started**. Step 2 - validation ''''''''''''''''''' @@ -163,12 +163,12 @@ Step 3 - migration If all the checks pass with no error messages, you can trigger the migration by selecting **Start migration**. +.. _stop-migration-mysql: + Step 4 - replicating '''''''''''''''''''' -.. _stop-migration-mysql: - -While the migration is in progress, you can +While the migration is in progress, you can: * Let it proceed until completed by selecting **Close window**, which closes the wizard. You come back to check the status at any time. * Discontinue the migration by selecting **Stop migration**, which retains the data already migrated. For information on how to follow up on a stopped migration process, see :ref:`Start over `. @@ -214,7 +214,7 @@ If you :ref:`stop a migration process `, you cannot restar If you start a new migration using the same connection details when your *target* database is not empty, the migration tool truncates your *target* database and an existing data set gets overwritten with the new data set. -Related reading +Related pages --------------- - :doc:`Migrate to Aiven for MySQL from an external MySQL ` diff --git a/docs/products/mysql/howto/migrate-from-external-mysql.rst b/docs/products/mysql/howto/migrate-from-external-mysql.rst index 83c6941bf3..14dfdc9c49 100644 --- a/docs/products/mysql/howto/migrate-from-external-mysql.rst +++ b/docs/products/mysql/howto/migrate-from-external-mysql.rst @@ -10,9 +10,11 @@ To perform a migration from an external MySQL to Aiven for MySQL the following r * The source server needs to be publicly available or accessible via a virtual private cloud (VPC) peering connection between the private networks, and any firewalls need to be open to allow traffic between the source and target servers. * You have a user account on the source server with sufficient privileges to create a user for the replication process. -* `GTID `_ is enabled on the source database. To review the current GTID setting, run the following command on the source cluster:: +* `GTID `_ is enabled on the source database. To review the current GTID setting, run the following command on the source cluster: - show global variables like 'gtid_mode'; +.. code:: + + show global variables like 'gtid_mode'; .. Note:: If you are migrating from MySQL in GCP, you need to enable backups with `PITR `_ for GTID to be set to ``on`` @@ -61,7 +63,7 @@ Perform the migration 3. Set the migration details via the ``avn service update`` :ref:`Aiven CLI command ` substituting the parameters accordingly:: - avn service update \ + avn service update --project PROJECT_NAME \ -c migration.host=SRC_HOSTNAME \ -c migration.port=SRC_PORT \ -c migration.username=SRC_USERNAME \ @@ -74,7 +76,7 @@ Perform the migration .. code:: - avn --show-http service migration-status DEST_NAME + avn --show-http service migration-status --project PROJECT_NAME DEST_NAME Whilst the migration process is ongoing, the ``migration_detail.status`` will be ``syncing``: @@ -110,5 +112,5 @@ If you reach a point where you no longer need the ongoing replication to happen, .. code:: - avn service update --remove-option migration DEST_NAME + avn service update --project PROJECT_NAME --remove-option migration DEST_NAME diff --git a/docs/products/mysql/howto/reclaim-disk-space.rst b/docs/products/mysql/howto/reclaim-disk-space.rst index 57a43c6ee5..5800acff65 100644 --- a/docs/products/mysql/howto/reclaim-disk-space.rst +++ b/docs/products/mysql/howto/reclaim-disk-space.rst @@ -7,9 +7,11 @@ You can configure InnoDB to release disk space back to the operating system by r `Under certain conditions `_ (for example, including the presence of a ``FULLTEXT`` index), command ``OPTIMIZE TABLE`` `copies `_ the data to a new table containing just the current data, and then drops and renames the new table to match the old one. During this process, data modification is blocked. This requires enough free space to store two copies of the current data at once. -To ensure that the space is also reclaimed on standby nodes, run the command as below without any additional modifiers like ``NO_WRITE_TO_BINLOG`` or ``LOCAL``:: +To ensure that the space is also reclaimed on standby nodes, run the command as below without any additional modifiers like ``NO_WRITE_TO_BINLOG`` or ``LOCAL``: - ``OPTIMIZE TABLE defaultdb.mytable;`` +.. code:: + + ``OPTIMIZE TABLE defaultdb.mytable;`` If you do not have enough free space to run the ``OPTIMIZE TABLE`` command, you can: diff --git a/docs/products/mysql/overview.rst b/docs/products/mysql/overview.rst index d3bcf53b1b..fd84e6fce2 100644 --- a/docs/products/mysql/overview.rst +++ b/docs/products/mysql/overview.rst @@ -1,15 +1,9 @@ Aiven for MySQL® overview ========================= -What is Aiven for MySQL? ------------------------- - Aiven for MySQL® is a full managed relational database service, deployable in the cloud of your choice. It is available at a size to suit your needs, from single-node starter plans to highly-available production platforms. MySQL has been a key part of the open source database landscape for a long time and it's a popular and reliable database platform. Aiven takes care of the management side and provides a MySQL that you can use. -Why MySQL? ----------- - -MySQL is a traditional open source relational database that plays nicely with many well-established applications. Whether you are building something new or looking for a modern platform for your existing applications, Aiven for MySQL is a friendly and scalable offering of one of the best open source databases around. +MySQL is a traditional open source relational database that interacts with many well-established applications. Whether you are building something new or looking for a modern platform for your existing applications, Aiven for MySQL is a friendly and scalable offering of one of the best open source databases around. Get started with Aiven for MySQL -------------------------------- diff --git a/docs/products/opensearch.rst b/docs/products/opensearch.rst index d26522ba59..e9d0d9b2e7 100644 --- a/docs/products/opensearch.rst +++ b/docs/products/opensearch.rst @@ -8,7 +8,7 @@ Aiven for OpenSearch® is a fully managed distributed search and analytics suite .. grid:: 1 2 2 2 - .. grid-item-card:: :doc:`Quickstart ` + .. grid-item-card:: :doc:`Quickstart ` :shadow: md :margin: 2 2 0 0 diff --git a/docs/products/opensearch/concepts/access_control.rst b/docs/products/opensearch/concepts/access_control.rst index 9ef29080c6..ee943efa0d 100644 --- a/docs/products/opensearch/concepts/access_control.rst +++ b/docs/products/opensearch/concepts/access_control.rst @@ -13,7 +13,7 @@ With access control enabled, you can customize the access control lists for each For more information about access control, patterns and permissions, see :doc:`Understanding access control in Aiven for OpenSearch® `. -Method 2: Enable OpenSearch® Security management |beta| +Method 2: Enable OpenSearch® Security management -------------------------------------------------------- Another way to manage user accounts, access control, roles, and permissions for your Aiven for OpenSearch® service is by :doc:`enabling OpenSearch® Security management `. This method lets you use the OpenSearch Dashboard and OpenSearch API to manage all aspects of your service's security. diff --git a/docs/products/opensearch/concepts/opensearch-vs-elasticsearch.rst b/docs/products/opensearch/concepts/opensearch-vs-elasticsearch.rst index 22a6f00bd6..cbcbf5874f 100644 --- a/docs/products/opensearch/concepts/opensearch-vs-elasticsearch.rst +++ b/docs/products/opensearch/concepts/opensearch-vs-elasticsearch.rst @@ -5,7 +5,7 @@ OpenSearch® is the open source continuation of the original Elasticsearch proje Version 1.0 release of OpenSearch should be very similar to the Elasticsearch release that it is based on, and Aiven encourages all customers to upgrade at their earliest convenience. This is to ensure that your platforms can continue to receive upgrades in the future. -To start exploring Aiven for OpenSearch®, check out the :doc:`Get Started with Aiven for OpenSearch® `. +To start exploring Aiven for OpenSearch®, check out the :doc:`Get Started with Aiven for OpenSearch® `. ----- diff --git a/docs/products/opensearch/concepts/os-security.rst b/docs/products/opensearch/concepts/os-security.rst index fb21453d2b..ec61d8adda 100644 --- a/docs/products/opensearch/concepts/os-security.rst +++ b/docs/products/opensearch/concepts/os-security.rst @@ -6,12 +6,14 @@ OpenSearch Security is a powerful feature that enhances the security of your Ope With OpenSearch Security enabled, you can manage user access and permissions directly from the :doc:`OpenSearch Dashboard `, giving you full control over your service's security. .. warning:: - * Once you have enabled OpenSearch Security management, you can no longer use `Aiven Console `_, `Aiven API `_, :doc:`Aiven CLI `, :doc:`Aiven Terraform provider ` or :doc:`Aiven Operator for Kubernetes® ` to manage access controls. - * You must use the OpenSearch Security Dashboard or OpenSearch Security API for managing user authentication and access control after enabling OpenSearch Security management. - * Once enabled, OpenSearch Security management cannot be disabled. If you need assistance disabling OpenSearch Security management, contact `Aiven support `_. + + * Once you have enabled OpenSearch Security management, you can no longer use `Aiven Console `_, `Aiven API `_, :doc:`Aiven CLI `, :doc:`Aiven Terraform provider ` or :doc:`Aiven Operator for Kubernetes® ` to manage access controls. + * You must use the OpenSearch Security Dashboard or OpenSearch Security API for managing user authentication and access control after enabling OpenSearch Security management. + * Once enabled, OpenSearch Security management cannot be disabled. If you need assistance disabling OpenSearch Security management, contact `Aiven support `_. .. note:: - * To implement basic and simplified access control, you can use :doc:`Aiven's Access Control Lists (ACL) ` to manage user roles and permissions. + + To implement basic and simplified access control, you can use :doc:`Aiven's Access Control Lists (ACL) ` to manage user roles and permissions. OpenSearch Security use cases -------------------------------- diff --git a/docs/products/opensearch/concepts/service-overview.rst b/docs/products/opensearch/concepts/service-overview.rst index add5fa9e2e..22d5d15466 100644 --- a/docs/products/opensearch/concepts/service-overview.rst +++ b/docs/products/opensearch/concepts/service-overview.rst @@ -3,7 +3,6 @@ Aiven for OpenSearch® overview Aiven for OpenSearch® is a fully managed distributed search and analytics suite, deployable in the cloud of your choice. Ideal for logs management, application and website search, analytical aggregations and more. OpenSearch is an open source fork derived from Elasticsearch. - `OpenSearch® `_ is an open-source search and analytics suite including a search engine, NoSQL document database, and visualization interface. OpenSearch offers a distributed, full-text search engine based on `Apache Lucene® `_ with a RESTful API interface and support for JSON documents. Aiven for OpenSearch and Aiven for OpenSearch Dashboards are available on a cloud of your choice. .. note:: @@ -11,9 +10,6 @@ Aiven for OpenSearch® is a fully managed distributed search and analytics suite Aiven for OpenSearch includes OpenSearch Dashboards, giving a fully-featured user interface and visualization platform for your data. -Why OpenSearch? ---------------- - OpenSearch is designed to be robust and scalable, capable of handling various data types and structures. It provides high-performance search functionality for data of any size or type, and with schemaless storage, it can index various sources with different data structures. OpenSearch is widely used for log ingestion and analysis, mainly because it can handle large data volumes, and OpenSearch Dashboards provide a powerful interface to the data, including search, aggregation, and analysis functionality. diff --git a/docs/products/opensearch/dashboards.rst b/docs/products/opensearch/dashboards.rst index 6d2d855725..7c2ad9eff6 100644 --- a/docs/products/opensearch/dashboards.rst +++ b/docs/products/opensearch/dashboards.rst @@ -9,7 +9,7 @@ OpenSearch® Dashboards is both a visualisation tool for data in the cluster and Get started with Aiven for OpenSearch Dashboards ------------------------------------------------ -Take your first steps with Aiven for OpenSearch Dashboards by following our :doc:`/docs/products/opensearch/dashboards/getting-started` article. +Take your first steps with Aiven for OpenSearch Dashboards by following our :doc:`/docs/products/opensearch/dashboards/get-started` article. .. note:: Starting with Aiven for OpenSearch® versions 1.3.13 and 2.10, OpenSearch Dashboards will remain available during a maintenance update that also consists of version updates to your Aiven for OpenSearch service. diff --git a/docs/products/opensearch/dashboards/getting-started.rst b/docs/products/opensearch/dashboards/get-started.rst similarity index 90% rename from docs/products/opensearch/dashboards/getting-started.rst rename to docs/products/opensearch/dashboards/get-started.rst index a9b542656d..77fc0c6612 100644 --- a/docs/products/opensearch/dashboards/getting-started.rst +++ b/docs/products/opensearch/dashboards/get-started.rst @@ -1,7 +1,7 @@ Getting started =============== -To start using **Aiven for OpenSearch® Dashboards**, :doc:`create Aiven for OpenSearch® service first` and OpenSearch Dashboards service will be added alongside it. Once the Aiven for OpenSearch service is running you can find connection information to your OpenSearch Dashboards in the service overview page and use your favourite browser to access OpenSearch Dashboards service. +To start using **Aiven for OpenSearch® Dashboards**, :doc:`create Aiven for OpenSearch® service first` and OpenSearch Dashboards service will be added alongside it. Once the Aiven for OpenSearch service is running you can find connection information to your OpenSearch Dashboards in the service overview page and use your favourite browser to access OpenSearch Dashboards service. .. note:: diff --git a/docs/products/opensearch/dashboards/howto/opensearch-alerting-dashboard.rst b/docs/products/opensearch/dashboards/howto/opensearch-alerting-dashboard.rst index bb84eddf94..385318184f 100644 --- a/docs/products/opensearch/dashboards/howto/opensearch-alerting-dashboard.rst +++ b/docs/products/opensearch/dashboards/howto/opensearch-alerting-dashboard.rst @@ -101,7 +101,7 @@ Monitor is a job that runs on a defined schedule and queries OpenSearch indices. Under **Run every** select ``1`` ``Minutes`` .. note:: - Schedule Frequency can be `By internal`, `Daily`, `Weekly`, `Monthly`, `Custom CRON expression` + Schedule Frequency can be ``By internal``, ``Daily`` ``Weekly`` ``Monthly``, ``Custom CRON expression``. 1. Fill in the fields under **Data source** diff --git a/docs/products/opensearch/getting-started.rst b/docs/products/opensearch/get-started.rst similarity index 100% rename from docs/products/opensearch/getting-started.rst rename to docs/products/opensearch/get-started.rst diff --git a/docs/products/opensearch/howto/audit-logs.rst b/docs/products/opensearch/howto/audit-logs.rst index e02c95a004..a036d2e474 100644 --- a/docs/products/opensearch/howto/audit-logs.rst +++ b/docs/products/opensearch/howto/audit-logs.rst @@ -1,4 +1,4 @@ -Enable, configure, and visualize OpenSearch® Audit logs +Enable and manage OpenSearch® Audit logs =============================================================== Aiven for OpenSearch® enables audit logging functionality via the OpenSearch Security dashboard, which allows OpenSearch Security administrators to track system events, security-related events, and user activity. These audit logs contain information about user actions, such as login attempts, API calls, index operations, and other security-related events. @@ -10,9 +10,23 @@ Prerequisites * Aiven for OpenSearch® service * :doc:`OpenSearch Security management enabled ` for the Aiven for OpenSearch service +After enabling audit logs in the service's advanced configuration, proceed to enable them in the OpenSearch® Security dashboard. -Enable audit logs ---------------------- +Enabling audit logs in Aiven for OpenSearch +---------------------------------------------- +By default, audit logs are disabled in Aiven for OpenSearch. To enable them: + +1. Access your Aiven for OpenSearch service in the Aiven Console. +2. From the left sidebar, click **Service settings** +3. Scroll to the **Advanced configuration** and click **Configure**. +4. In the **Advanced configuration** dialog, click **Add configuration to options**. +5. Use the search function to locate the ``enable_security_audit`` configuration and switch it to the **Enabled** position. +6. Click **Save configuration** to save your changes and enable audit logging. + +After enabling audit logs in the service's advanced configuration, proceed to enable them in the OpenSearch® Security dashboard. + +Enable audit logs in OpenSearch® Security dashboard +----------------------------------------------------- To enable audit logs in OpenSearch® Security dashboard, follow these steps: 1. Log in to the OpenSearch® Dashboard using OpenSearch® Security admin credentials. @@ -41,36 +55,25 @@ The following are the types of audit events recorded by OpenSearch: Configure audit logging ------------------------ -The audit logging settings in OpenSearch® Security can be tailored to meet your organization's requirements by customizing the *General* and *Compliance* settings sections. The following are the available settings for each section: +Customize the audit logging settings in OpenSearch® Security to align with your organization's specific requirements. The configuration process involves two primary sections: General and Compliance settings, each offering distinct options: -General settings -``````````````````` -* **Layer settings**: This section allows you to enable or disable logging for the REST and Transport layers. You can also exclude specific categories of events from being logged, such as events related to user authentication, to reduce noise in the logs. -* **Attribute settings**: This section allows customization of log data for each event, including options to log the request body, resolved indices, and sensitive headers. Additionally, there is an option to enable or disable logging for bulk requests. -* **Ignore settings**: This section allows you to exclude specific users or requests from being logged. This is useful for excluding internal users or automated processes that generate a lot of noise in the logs. +* **General settings:** Adjust logging for REST and Transport layers, tailor log data for each event, and set preferences to selectively exclude specific users or requests, ensuring log relevance and operational efficiency. +* **Compliance settings:** Enable compliance mode to meet regulatory standards and activate tamper-evident logging. You can also enable logging for internal and external configuration changes as well as metadata logging options for a robust security posture. -Compliance settings -````````````````````` -* **Compliance mode**: This enables logging of all events in a tamper-evident manner, which prevents deletion or modification of logs, ensuring compliance with specific regulations or standards. -* **Config**: This enables logging of changes to OpenSearch Security configuration files, allowing you to monitor security policies and settings changes. -* **Internal config logging**: This enables logging of events on the internal security index, allowing you to monitor changes to the OpenSearch Security configuration made by internal users or processes. -* **External config logging**: This enables logging of external configuration changes, allowing you to monitor changes to external authentication providers or other systems integrated with OpenSearch Security. -* **Read metadata and write metadata options**: This enables metadata logging for read and write operations. You can also exclude specific users or watched fields from being logged. - -..note: +.. note:: * You cannot modify the name of the audit log index for your Aiven for OpenSearch service as it is set to the default name ``auditlog-YYYY.MM.dd``. * You cannot change the size of the thread pool using ``OpenSearch.yml``. Optimize audit log configuration ````````````````````````````````` -Optimizing the configuration of audit logs in OpenSearch Security can ensure that your OpenSearch Security system is logging only the necessary information and protecting sensitive data from unauthorized access. +Optimize audit log configuration in OpenSearch Security to protect sensitive data from unauthorized access. + +* Exclude irrelevant event categories. +* Consider disabling logging for Rest and Transport layers. +* Disable request body logging for sensitive information. +* Regularly review and maintain audit log configuration. -* **Exclude categories**: To make it easier to identify important events and reduce the amount of data stored, consider excluding categories of events irrelevant to your security policies. For example, you may not need to log every successful login event, but may want to log all failed login attempts. -* **Disable Rest and transport layers**: By disabling logging for Rest and Transport layers, you can prevent sensitive information such as passwords and usernames from being logged. These layers are used to communicate with OpenSearch cluster nodes and may contain sensitive data. -* **Disable request body logging**: To prevent the logging of sensitive information such as credit card numbers and personal information, consider disabling request body logging. This can help prevent unauthorized access to sensitive data. -* **Additional configuration options**: Depending on your specific security requirements and policies, you may want to configure additional options such as disabling logging of all affected index names from an alias or wildcard, configuring bulk request handling, excluding specific requests or users from logs, configuring the audit log index name, and tuning the thread pool. -* **Regular review and maintenance**: It is essential to periodically review and maintain your audit log configuration to ensure that it is up-to-date with your security policies and requirements. This can help you identify potential security threats and take action to prevent them. Visualize audit log -------------------- @@ -78,29 +81,13 @@ Visualizing audit logs is an effective way to understand the extensive data gene To access and visualize audit logs in OpenSearch, follow the steps below: -1. **Create an index pattern**: - - a. Go to the OpenSearch Dashboards left side menu and select **Stack Management**. - b. Select on **Index Patterns** and select **Create index pattern**. - c. Enter the name of the index that contains the audit logs and follow the prompts to complete the index pattern creation process. +1. **Create an index pattern**: In the **Stack Management** section, establish an index pattern to organize your log data. -2. **Create a visualization**: - - a. Select **Visualize** in the OpenSearch Dashboards left side menu, and then select **Create new visualization** or **Create visualization** if there are already saved visualizations. - b. Choose the type of visualization you want to create and select the index pattern you created in the previous step. - c. Choose the specific fields you want to display in your visualization. +2. **Create a visualization**: Navigate to the **Visualize** section, where you can design and tailor visualizations to suit your analysis needs. -3. **Save visualization**: - - a. Select **Save** in the top right corner of the dashboard. - b. In the **Save visualization** screen, enter a title and description for the visualization. - c. Click Save. - -4. **Modify visualization**: - - To make changes to a visualization, in the **Visualization** screen, select the pencil icon next to the visualization you want to modify, make the desired changes, and save them. +3. **Save and modify visualization**: Once you've created a visualization, save it for future reference. You can always return to modify and update it as your requirements evolve -Related reading +Related pages ---------------- * `OpenSearch audit logs documentation `_ \ No newline at end of file diff --git a/docs/products/opensearch/howto/connect-with-nodejs.rst b/docs/products/opensearch/howto/connect-with-nodejs.rst index 798f8c8bff..1e5b3c0df3 100644 --- a/docs/products/opensearch/howto/connect-with-nodejs.rst +++ b/docs/products/opensearch/howto/connect-with-nodejs.rst @@ -7,9 +7,11 @@ To connect to the cluster, you'll need ``service_uri``, which you can find eithe We strongly recommend using environment variables for credential information. A good way to do this is to use ``dotenv``. You will find installation and usage instructions `on its library's project page `_, but in short, you need to create ``.env`` file in the project and assign ``SERVICE_URI`` inside of this file. -Add the require line to the top of your file:: +Add the require line to the top of your file: - require("dotenv").config() +.. code:: + + require("dotenv").config() Now you can refer to the value of ``service_uri`` as ``process.env.SERVICE_URI`` in the code. diff --git a/docs/products/opensearch/howto/migrating_elasticsearch_data_to_aiven.rst b/docs/products/opensearch/howto/migrating_elasticsearch_data_to_aiven.rst index 50291fbfb4..68f948e520 100644 --- a/docs/products/opensearch/howto/migrating_elasticsearch_data_to_aiven.rst +++ b/docs/products/opensearch/howto/migrating_elasticsearch_data_to_aiven.rst @@ -13,11 +13,12 @@ Elasticsearch servers to the same cluster, online migration is not currently possible. .. important:: - Migrating from Elasticsearch to OpenSearch may affect the connectivity between client applications and your service. For example, some code included in clients or tools may check the service version, which might not work with OpenSearch. We recommend that you check the following OpenSearch resources for more information: + + Migrating from Elasticsearch to OpenSearch may affect the connectivity between client applications and your service. For example, some code included in clients or tools may check the service version, which might not work with OpenSearch. We recommend that you check the following OpenSearch resources for more information: - * `OpenSearch release notes `_ - * `OpenSearch Dashboards release notes `_ - * `Frequently asked questions about OpenSearch `_ + * `OpenSearch release notes `_ + * `OpenSearch Dashboards release notes `_ + * `Frequently asked questions about OpenSearch `_ To migrate or copy data: diff --git a/docs/products/opensearch/howto/opensearch-aggregations-and-nodejs.rst b/docs/products/opensearch/howto/opensearch-aggregations-and-nodejs.rst index c16127a2ec..b534ca7567 100644 --- a/docs/products/opensearch/howto/opensearch-aggregations-and-nodejs.rst +++ b/docs/products/opensearch/howto/opensearch-aggregations-and-nodejs.rst @@ -9,7 +9,7 @@ Learn how to aggregate data using OpenSearch and its NodeJS client. In this tuto Prepare the playground ********************** -You can create an OpenSearch cluster either with the visual interface or with the command line. Depending on your preference follow the instructions for :doc:`getting started with the console for Aiven for Opensearch ` or see :doc:`how to create a service with the help of Aiven command line interface `. +You can create an OpenSearch cluster either with the visual interface or with the command line. Depending on your preference follow the instructions for :doc:`getting started with the console for Aiven for Opensearch ` or see :doc:`how to create a service with the help of Aiven command line interface `. .. note:: diff --git a/docs/products/opensearch/howto/opensearch-and-nodejs.rst b/docs/products/opensearch/howto/opensearch-and-nodejs.rst index 83a046a2f6..c6eef5da89 100644 --- a/docs/products/opensearch/howto/opensearch-and-nodejs.rst +++ b/docs/products/opensearch/howto/opensearch-and-nodejs.rst @@ -1,12 +1,12 @@ Write search queries with OpenSearch® and NodeJS ================================================ -Learn how the OpenSearch® JavaScript client gives a clear and useful interface to communicate with an OpenSearch cluster and run search queries. To make it more delicious we'll be using a recipe dataset from Kaggle 🍕. +Learn how the OpenSearch® JavaScript client gives a clear and useful interface to communicate with an OpenSearch cluster and run search queries. To make it more delicious we'll be using a recipe dataset from Kaggle. Prepare the playground ********************** -You can create an OpenSearch cluster either with the visual interface or with the command line. Depending on your preference follow the instructions for :doc:`getting started with the console for Aiven for Opensearch ` or see :doc:`how to create a service with the help of Aiven command line interface `. +You can create an OpenSearch cluster either with the visual interface or with the command line. Depending on your preference follow the instructions for :doc:`getting started with the console for Aiven for Opensearch ` or see :doc:`how to create a service with the help of Aiven command line interface `. .. note:: @@ -395,21 +395,19 @@ In the next method we combine what we learned so far, using both term-level and Now it's your turn to experiment! Create your own boolean query, using what we learned to find recipes with particular nutritional values and ingredients. Experiment using different clauses to see how they affects the results. -What's next? -************ - -Now that you learned how to work with search queries, have a look at :doc:`our tutorial for aggregations `. Or, if you're done for a day, see :doc:`how you can pause the service `. - -Resources -********* - -We created an OpenSearch cluster, connected to it and tried out different types of search queries. But this is just a tip of the iceberg. Here are some resources to help you learn other features of OpenSearch and its JavaScript client - -* `Demo repository `_ - All the examples we run in this tutorial can be found in -* `OpenSearch JavaScript client `_ -* :doc:`How to use OpenSearch with curl ` -* `Official OpenSearch documentation `_ - * `Term-level queries `_ - * `Full-text queries `_ - * `Boolean queries `_ +Related pages +************* + +- :doc:`Aggregation tutorial `. +- :doc:`Pausing the service `. +- `Demo repository `_. + All the examples we run in this tutorial can be found in: + + - `OpenSearch JavaScript client `_ + - :doc:`How to use OpenSearch with curl ` + - `Official OpenSearch documentation `_ + + - `Term-level queries `_ + - `Full-text queries `_ + - `Boolean queries `_ diff --git a/docs/products/opensearch/howto/opensearch-dashboard-multi_tenancy.rst b/docs/products/opensearch/howto/opensearch-dashboard-multi_tenancy.rst index f7f552a907..87bcee273b 100644 --- a/docs/products/opensearch/howto/opensearch-dashboard-multi_tenancy.rst +++ b/docs/products/opensearch/howto/opensearch-dashboard-multi_tenancy.rst @@ -1,4 +1,4 @@ -Set up OpenSearch® Dashboard multi-tenancy |beta| +Set up OpenSearch® Dashboard multi-tenancy ================================================== Aiven for OpenSearch® provides support for multi-tenancy through OpenSearch Security Dashboard. Multi-tenancy in OpenSearch Security enables multiple users or groups to securely access the same OpenSearch cluster while maintaining their distinct permissions and data access levels. With multi-tenancy, each tenant has its own isolated space for working with indexes, visualizations, dashboards, and other OpenSearch objects, ensuring tenant-specific data and resources are protected from unauthorized access. @@ -80,6 +80,6 @@ To manage tenants in the OpenSearch dashboard, you can follow these steps: * **Edit, delete, or duplicate tenants**: To manage existing tenants, select them from the list and use the **Actions** dropdown to edit, delete, or duplicate them according to your needs. -Related articles +Related pages ------------------ * `OpenSearch Dashboards multi-tenancy `_ \ No newline at end of file diff --git a/docs/products/opensearch/howto/opensearch-log-integration.rst b/docs/products/opensearch/howto/opensearch-log-integration.rst index 239b589e23..8c08138bc3 100644 --- a/docs/products/opensearch/howto/opensearch-log-integration.rst +++ b/docs/products/opensearch/howto/opensearch-log-integration.rst @@ -45,14 +45,10 @@ There are two parameters that you can adjust when integrating logs to your OpenS You can change the configuration of the ``index prefix`` and ``index retention limit`` after the integration is enabled. 1. Log in to the `Aiven Console `_, and select the Aiven for OpenSearch service. - -2. On the service's **Overview** screen, scroll to **Service integrations**, and select **Manage integrations**. - -4. From the list in the **Integrations** screen, find the service to configure. - -5. Select **Edit** from the drop-down menu (ellipsis), and make the necessary changes to the parameters. - -6. Select **Edit** to save your changes. +2. Click **Integrations** on the sidebar. +3. Identify the service you want to configure in the Integrations page. +4. Click **Actions** (**...**) menu, select **Edit** to modify the necessary parameters. +5. After making the changes, click **Edit** again to save them. Disable logs integration @@ -60,12 +56,8 @@ Disable logs integration If you no longer wish to send logs from your service to OpenSearch, follow these steps to disable the integration: -1. On the **Overview** page, scroll down to the **Service integrations**. - -2. Select **Manage integrations**. - -3. From the list in the **Integrations** screen, find the service to disable the integration. - -4. Select **Disconnect** from the drop-down menu (ellipsis). +1. In your Aiven for OpenSearch service, navigate to the **Integrations** screen using the left sidebar and locate the service you want to modify. +2. From the **Actions** (**...**) menu, select **Disconnect** to proceed with disabling the integration. +3. In the confirmation window, click **Disconnect** again to confirm and save the changes. Your log integration for OpenSearch will be disabled. diff --git a/docs/products/opensearch/howto/opensearch-search-and-python.rst b/docs/products/opensearch/howto/opensearch-search-and-python.rst index e8736cd30a..f127bf8b1d 100644 --- a/docs/products/opensearch/howto/opensearch-search-and-python.rst +++ b/docs/products/opensearch/howto/opensearch-search-and-python.rst @@ -522,8 +522,8 @@ As you can see, this search returns results 🍍: It is your turn, try out more combinations to better understand the fuzzy query. -Read more -''''''''' +Related pages +''''''''''''' Want to try out OpenSearch with other clients? You can learn how to write search queries with NodeJS client, see :doc:`our tutorial `. We created an OpenSearch cluster, connected to it, and tried out different types of search queries. Now, you can explore more resources to help you to learn other features of OpenSearch and its Python client. diff --git a/docs/products/opensearch/howto/opensearch-with-curl.rst b/docs/products/opensearch/howto/opensearch-with-curl.rst index 9a930f223c..491e109f99 100644 --- a/docs/products/opensearch/howto/opensearch-with-curl.rst +++ b/docs/products/opensearch/howto/opensearch-with-curl.rst @@ -110,9 +110,11 @@ OpenSearch is designed to make stored information easy to search and access (the Search all items '''''''''''''''' -OpenSearch has support for excellent search querying, but if you want to get everything:: +OpenSearch has support for excellent search querying, but if you want to get everything: - curl OPENSEARCH_URI/_search +.. code:: + + curl OPENSEARCH_URI/_search Search results include some key fields to look at when you try this example: diff --git a/docs/products/opensearch/howto/saml-sso-authentication.rst b/docs/products/opensearch/howto/saml-sso-authentication.rst index 4a30bbce66..884d5ed170 100644 --- a/docs/products/opensearch/howto/saml-sso-authentication.rst +++ b/docs/products/opensearch/howto/saml-sso-authentication.rst @@ -42,7 +42,7 @@ To enable SAML authentication for your Aiven for OpenSearch service, follow thes .. note:: - The SP Entity ID can be any arbitrary value defined by the user. Additionally, OpenSearch suggests creating a new application for OpenSearch Dashboards and using the URL of your OpenSearch Dashboards as the SP entity ID. + The SP Entity ID can be any arbitrary value defined by the user. Additionally, OpenSearch suggests creating a new application for OpenSearch Dashboards and using the URL of your OpenSearch Dashboards as the SP entity ID. * **SAML roles key**: This is an optional field that allows you to map SAML roles to Aiven for OpenSearch roles. * **SAML subject key**: This is also an optional field that allows you to map SAML subject to Aiven for OpenSearch users. diff --git a/docs/products/opensearch/howto/setup-cross-cluster-replication-opensearch.rst b/docs/products/opensearch/howto/setup-cross-cluster-replication-opensearch.rst index d23a866b32..fefac10471 100644 --- a/docs/products/opensearch/howto/setup-cross-cluster-replication-opensearch.rst +++ b/docs/products/opensearch/howto/setup-cross-cluster-replication-opensearch.rst @@ -1,4 +1,4 @@ -Setup cross cluster replication for Aiven for OpenSearch® |beta| +Setup cross cluster replication for Aiven for OpenSearch® ================================================================ .. important:: @@ -6,19 +6,19 @@ Setup cross cluster replication for Aiven for OpenSearch® |beta| Aiven for OpenSearch® cross-cluster replication (CCR) is a :doc:`limited availability feature `. If you're interested in trying out this feature, contact the sales team at `sales@Aiven.io `_. .. note:: - - Cross cluster replication feature for Aiven for OpenSearch is a beta release. - - Cross cluster replication is not available for Hobbyist and Startup plans. + + - Cross cluster replication is not available for the Hobbyist and Startup plans. Follow these steps to set up :doc:`cross cluster replication ` for your Aiven for OpenSearch service: 1. Log in to the `Aiven Console `_, and select the Aiven for OpenSearch service for which you want to set up cross cluster replication. 2. In the service's **Overview** screen, scroll to the **Cross cluster replications** section and select **Create follower**. -3. In the **Create OpenSearch follower cluster** page, +3. In the **Create OpenSearch follower cluster** page: - * Enter a name for the follower cluster - * Select the desired cloud provider - * Select the desired cloud region - * Select the service plan + * Enter a name for the follower cluster. + * Select the desired cloud provider. + * Select the desired cloud region. + * Select the service plan. .. note:: During creation, the follower cluster service must have the same service plan as the leader cluster service. This ensures the follower cluster service has as much memory as the leader cluster. You can change the service plan as required later. diff --git a/docs/products/postgresql.rst b/docs/products/postgresql.rst index 31e38f5c23..d606b8a273 100644 --- a/docs/products/postgresql.rst +++ b/docs/products/postgresql.rst @@ -7,7 +7,7 @@ Aiven for PostgreSQL® is is a fully-managed and hosted relational database serv .. grid:: 1 2 2 2 - .. grid-item-card:: :doc:`Quickstart ` + .. grid-item-card:: :doc:`Quickstart ` :shadow: md :margin: 2 2 0 0 diff --git a/docs/products/postgresql/concepts/aiven-db-migrate.rst b/docs/products/postgresql/concepts/aiven-db-migrate.rst index 76ea4d9292..2c51817e67 100644 --- a/docs/products/postgresql/concepts/aiven-db-migrate.rst +++ b/docs/products/postgresql/concepts/aiven-db-migrate.rst @@ -17,19 +17,19 @@ Migration requirements The following are the two basic requirements for a migration: -1. the source server is publicly available or there is a virtual private cloud (VPC) peering connection between the private networks -2. a user account with access to the destination cluster from an external IP, as configured in ``pg_hba.conf`` on the source cluster is present +#. The source server is publicly available or there is a virtual private cloud (VPC) peering connection between the private networks +#. A user account with access to the destination cluster from an external IP, as configured in ``pg_hba.conf`` on the source cluster is present. Additionally to perform a **logical replication**, the following need to be valid: -3. PostgreSQL® version 10 or newer -4. Credentials with superuser access to the source cluster or the ``aiven-extras`` extension installed (see also: `Aiven Extras on GitHub `_) +#. PostgreSQL® version 10 or newer +#. Credentials with superuser access to the source cluster or the ``aiven-extras`` extension installed (see also: `Aiven Extras on GitHub `_) -.. Note:: - The ``aiven_extras`` extension allows you to perform publish/subscribe-style logical replication without a superuser account, and it is preinstalled on Aiven for PostgreSQL servers. + .. Note:: + The ``aiven_extras`` extension allows you to perform publish/subscribe-style logical replication without a superuser account, and it is preinstalled on Aiven for PostgreSQL servers. -* An available replication slot on the destination cluster for each database migrated from the source cluster. -* ``wal_level`` setting on the source cluster to ``logical``. + * An available replication slot on the destination cluster for each database migrated from the source cluster. + * ``wal_level`` setting on the source cluster to ``logical``. Migration pre-checks '''''''''''''''''''' diff --git a/docs/products/postgresql/concepts/dba-tasks-pg.rst b/docs/products/postgresql/concepts/dba-tasks-pg.rst index eeb3f21d19..28fdba0d4d 100644 --- a/docs/products/postgresql/concepts/dba-tasks-pg.rst +++ b/docs/products/postgresql/concepts/dba-tasks-pg.rst @@ -29,8 +29,10 @@ The ``aiven_extras`` extension, developed and maintained by Aiven, enables the ` * Manage `publications `_ * :doc:`Claim public schema ownership ` -You can install the ``aiven_extras`` extension executing the following command with the ``avnadmin`` user:: +You can install the ``aiven_extras`` extension executing the following command with the ``avnadmin`` user: - CREATE EXTENSION aiven_extras CASCADE; +.. code:: + + CREATE EXTENSION aiven_extras CASCADE; For more information about ``aiven_extras`` check the `GitHub repository `_ for the project. diff --git a/docs/products/postgresql/concepts/high-availability.rst b/docs/products/postgresql/concepts/high-availability.rst index 03194fd1e3..842677b179 100644 --- a/docs/products/postgresql/concepts/high-availability.rst +++ b/docs/products/postgresql/concepts/high-availability.rst @@ -25,7 +25,7 @@ Aiven for PostgreSQL® is available on a variety of plans, offering different le About primary and standby nodes ------------------------------- -Aiven's Business and Premium plans offer :ref:`primary ` and :ref:`standby ` nodes. Having a standby service is useful for multiple reasons: +Aiven's Business and Premium plans offer :doc:`primary nodes ` and :doc:`standby nodes ` nodes. A standby service is useful for multiple reasons: * Provides another physical copy of the data in case of hardware, software, or network failures * Typically reduces the data loss window in disaster scenarios @@ -49,21 +49,21 @@ When the failed node is a PostgreSQL **standby** node, the primary node keeps ru When the failed node is a PostgreSQL **primary** node, the combined information from the Aiven monitoring infrastructure and the standby node is used to make a failover decision. The standby node is then promoted as the new primary and immediately starts serving clients. A new replacement node is automatically scheduled and becomes the new standby node. -If all the **primary** and **standby nodes** fail at the same time, new nodes are automatically scheduled for creation to become the new primary and standby. The primary node is restored from the latest available backup, which could involve some degree of data loss. Any write operations made since the backup of the latest :ref:`WAL` file are lost. Typically, this time window is limited to either five minutes of time or one :ref:`WAL` file. +If all the **primary** and **standby nodes** fail at the same time, new nodes are automatically scheduled for creation to become the new primary and standby. The primary node is restored from the latest available backup, which could involve some degree of data loss. Any write operations made since the backup of the latest :doc:`WAL ` file are lost. Typically, this time window is limited to either five minutes of time or one :doc:`WAL ` file. .. Note:: The amount of time it takes to replace a failed node depends mainly on the selected cloud region and the amount of data to be restored. However, in the case of partial loss of the cluster, the surviving node keeps on serving clients even during the recreation of the other node. All of this is automatic and requires no administrator intervention. -**Premium** plans operate in a similar way as **Business** plans. The main difference comes when one of the **standby** or **primary** nodes fails. Premium plans have an additional, redundant standby node available, providing platform availability even in the event of losing two nodes. In cases where the primary node fails, Aiven monitoring tool, using :ref:`PGLookout`, determines which of the standby nodes is the furthest along in replication (has the least potential for data loss) and does a controlled failover to that node. +**Premium** plans operate in a similar way as **Business** plans. The main difference comes when one of the **standby** or **primary** nodes fails. Premium plans have an additional, redundant standby node available, providing platform availability even in the event of losing two nodes. In cases where the primary node fails, Aiven monitoring tool, using :doc:`PGLookout `, determines which of the standby nodes is the furthest along in replication (has the least potential for data loss) and does a controlled failover to that node. .. Note:: - For backups and restoration, Aiven utilises the popular Open Source backup daemon :ref:`PGHoard `, which Aiven maintains. It makes real-time copies of :ref:`WAL` files to an object store in compressed and encrypted format. + For backups and restoration, Aiven utilises the popular Open Source backup daemon :doc:`PGHoard `, which Aiven maintains. It makes real-time copies of :doc:`WAL ` files to an object store in compressed and encrypted format. Single-node Hobbyist and Startup service plans ---------------------------------------------- Hobbyist and Startup plans provide a single node; when it's lost, Aiven immediately starts the automatic process of creating a new replacement node. The new node starts up, restores its state from the latest available backup, and resumes serving customers. -Since there is just a single node providing the service, the service is unavailable for the duration of the restoration. In addition, any write operations made since the backup of the latest :ref:`WAL` file are lost. Typically, this time window is limited to either five minutes of time or one :ref:`WAL` file. +Since there is just a single node providing the service, the service is unavailable for the duration of the restoration. In addition, any write operations made since the backup of the latest :doc:`WAL ` file are lost. Typically, this time window is limited to either five minutes of time or one :doc:`WAL ` file. More information about on PostgreSQL upgrade and failover procedures is available at :doc:`the dedicated page `. diff --git a/docs/products/postgresql/concepts/pg-backups.rst b/docs/products/postgresql/concepts/pg-backups.rst index 40d59b682f..6f6111cc04 100644 --- a/docs/products/postgresql/concepts/pg-backups.rst +++ b/docs/products/postgresql/concepts/pg-backups.rst @@ -6,7 +6,7 @@ About backups in Aiven for PostgreSQL® Aiven for PostgreSQL® databases are automatically backed up, with **full backups** made daily, and **write-ahead logs (WAL)** copied at 5 minute intervals, or for every new file generated. All backups are encrypted using ``pghoard``, an open source tool developed and maintained by Aiven, that you can find `on GitHub `_. -The time of day when the daily backups are made is initially randomly selected, but can be customised by setting the ``backup_hour`` and ``backup_minute`` advanced parameters, see :doc:`/docs/products/postgresql/reference/list-of-advanced-params`. +The time of day when the daily backups are made is initially randomly selected, but can be customised by setting the ``backup_hour`` and ``backup_minute`` advanced parameters, see :doc:`/docs/products/postgresql/reference/advanced-params`. .. note:: diff --git a/docs/products/postgresql/concepts/pg-shared-buffers.rst b/docs/products/postgresql/concepts/pg-shared-buffers.rst index bafbf57875..171525b8c9 100644 --- a/docs/products/postgresql/concepts/pg-shared-buffers.rst +++ b/docs/products/postgresql/concepts/pg-shared-buffers.rst @@ -170,7 +170,7 @@ You may want to prewarm the ``shared_buffers`` in anticipation of a specific wor If the ``shared buffers`` size is less than pre-loaded data, only the tailing end of the data is cached as the earlier data encounters a forced ejection. -Read more ------------ +Related pages +------------- For more information on shared buffers, see `Resource Consumption `_ in the PostgreSQL documentation. diff --git a/docs/products/postgresql/concepts/pgvector.rst b/docs/products/postgresql/concepts/pgvector.rst index 483b773b35..ed1cd1a085 100644 --- a/docs/products/postgresql/concepts/pgvector.rst +++ b/docs/products/postgresql/concepts/pgvector.rst @@ -26,16 +26,11 @@ In most cases, vector similarity calculations use distance metrics, for example, How pgvector works ------------------ -Enabling pgvector - You enable the extension on your database. -Vectorizing data - You generate embeddings for your data, for example, for a products catalog using tools such as the `OpenAI API `_ client. -Storing embeddings - You store the embeddings in Aiven for PostgreSQL using the pgvector extension. -Querying embeddings - You use the embeddings for the vector similarity search on the products catalog. -Adding indices - By default, pgvector executes the *exact* nearest neighbor search, which gives the perfect recall. If you add an index to use the *approximate* nearest neighbor search, you can speed up your search, trading off some recall for performance. +- Enabling pgvector: You enable the extension on your database. +- Vectorizing data: You generate embeddings for your data, for example, for a products catalog using tools such as the `OpenAI API `_ client. +- Storing embeddings: You store the embeddings in Aiven for PostgreSQL using the pgvector extension. +- Querying embeddings: You use the embeddings for the vector similarity search on the products catalog. +- Adding indices: By default, pgvector executes the *exact* nearest neighbor search, which gives the perfect recall. If you add an index to use the *approximate* nearest neighbor search, you can speed up your search, trading off some recall for performance. Why use pgvector ---------------- @@ -55,15 +50,11 @@ There are multiple industry applications for similarity searches over vector emb .. topic:: Examples - * AI-powered tools can find similarities between products or transactions, which can be used to produce product recommendations or detect potential scams or frauds. - * Sentiment analysis: words represented with similar vector embeddings have similar sentiment scores. + * AI-powered tools can find similarities between products or transactions, which can be used to produce product recommendations or detect potential scams or frauds. + * Sentiment analysis: words represented with similar vector embeddings have similar sentiment scores. -What's next ------------ +Related pages +------------- -:doc:`Enable and use pgvector on Aiven for PostgreSQL® ` - -Related reading ---------------- - -`pgvector README on GitHub `_ +- :doc:`Enable and use pgvector on Aiven for PostgreSQL® ` +- `pgvector README on GitHub `_ diff --git a/docs/products/postgresql/concepts/timescaledb.rst b/docs/products/postgresql/concepts/timescaledb.rst index d77fb39efa..9fde47de00 100644 --- a/docs/products/postgresql/concepts/timescaledb.rst +++ b/docs/products/postgresql/concepts/timescaledb.rst @@ -13,9 +13,11 @@ The data in these examples consists of a measured value (temperature or position Enable TimescaleDB on Aiven for PostgreSQL ------------------------------------------ -TimescaleDB is available as an extension; you can enable it by running:: +TimescaleDB is available as an extension; you can enable it by running: - CREATE EXTENSION timescaledb CASCADE; +.. code:: + + CREATE EXTENSION timescaledb CASCADE; After enabling the extension, you can create TimescaleDB hypertables and make use of its features for working with time-series data. For further information, have a look at the `Getting Started `_ guide from Timescale. diff --git a/docs/products/postgresql/concepts/upgrade-failover.rst b/docs/products/postgresql/concepts/upgrade-failover.rst index cac7bd60d3..0dc5a13337 100644 --- a/docs/products/postgresql/concepts/upgrade-failover.rst +++ b/docs/products/postgresql/concepts/upgrade-failover.rst @@ -31,9 +31,15 @@ After the replica promotion, ``servicename-projectname.aivencloud.com`` would po Replica server disconnection """""""""""""""""""""""""""" -If the **replica** server disappears, Aiven's management platform uses a **300-second timeout** before marking the server as down and creating a new replica server. During this period, the DNS ``replica-servicename-projectname.aivencloud.com`` points to the disappeared server that might not serve queries anymore. The DNS record pointing to the primary server (``servicename-projectname.aivencloud.com``) remains unchanged. +If the **replica** server disappears, Aiven's management platform uses a **60-second timeout** before marking the server as down and creating a new replica server. -If the replica server does not come back online during these 300 seconds, ``replica-servicename-projectname.aivencloud.com`` is pointed to the primary server until a new replica server is fully functional. +.. note:: + Each Aiven for PostgreSQL® Business plan supports one replica server only, which is why the service's read replica endpoint ``replica-SERVICE_NAME-PROJECT_NAME.aivencloud.com`` remains unavailable and queries to this endpoint time-out until a new replica is available. + +.. tip:: + For higher availability on a service's read replica endpoint, you can upgrade to a Premium plan with two standby servers used as read replicas. + +The DNS record pointing to primary server ``SERVICE_NAME-PROJECT_NAME.aivencloud.com`` remains unchanged during the recovery of the replica server. Controlled switchover during upgrades or migrations --------------------------------------------------- diff --git a/docs/products/postgresql/getting-started.rst b/docs/products/postgresql/get-started.rst similarity index 81% rename from docs/products/postgresql/getting-started.rst rename to docs/products/postgresql/get-started.rst index ced9d82f8f..1afcd113f4 100644 --- a/docs/products/postgresql/getting-started.rst +++ b/docs/products/postgresql/get-started.rst @@ -46,9 +46,9 @@ If you're checking out PostgreSQL, loading a test dataset will give you somethin \c dellstore -.. Tip:: + .. Tip:: - Your ``psql`` terminal prefix will change to ``dellstore==>`` when you are connected to the correct database. + Your ``psql`` terminal prefix will change to ``dellstore==>`` when you are connected to the correct database. 5. Populate the database by executing the following command from ``psql``: @@ -83,15 +83,16 @@ The output should look like this: public | reorder | table | avnadmin (12 rows) -Further reading ----------------- -Here are some more resources to help you on your PostgreSQL journey: +.. seealso:: -* Code examples for connecting to PostgreSQL from your application: - * :doc:`Go ` - * :doc:`Python ` -* How to :doc:`use PgAdmin ` with Aiven for PostgreSQL -* How to :doc:`migrate your PostgreSQL to Aiven ` -* Learn PostgreSQL with some `PostgreSQL Exercises `_ -* The `awesome PostgreSQL ecosystem `_ of tools and solutions + - Code examples for connecting to PostgreSQL from your application: + + - :doc:`Go ` + - :doc:`Python ` + + - How to :doc:`use PgAdmin ` with Aiven for PostgreSQL + - How to :doc:`migrate your PostgreSQL to Aiven ` + - Learn PostgreSQL with some `PostgreSQL Exercises `_ + - The `awesome PostgreSQL ecosystem `_ of tools and solutions + \ No newline at end of file diff --git a/docs/products/postgresql/howto.rst b/docs/products/postgresql/howto.rst index 4f66fe0051..2dc08f8a32 100644 --- a/docs/products/postgresql/howto.rst +++ b/docs/products/postgresql/howto.rst @@ -40,6 +40,7 @@ Aiven for PostgreSQL® how-tos - :doc:`Prevent PostgreSQL® full disk issues ` - :doc:`Enable and use pgvector on Aiven for PostgreSQL® ` - :doc:`Check size of a database, a table or an index ` + - :doc:`Restrict access to databases or tables in Aiven for PostgreSQL®". ` .. dropdown:: Migration @@ -64,7 +65,7 @@ Aiven for PostgreSQL® how-tos - :doc:`Monitor PostgreSQL metrics with Grafana® ` - :doc:`Monitor PostgreSQL metrics with pgwatch2 ` - :doc:`Visualize data with Grafana® ` - - :doc:`Report and analyze with Google Data Studio ` + - :doc:`Report and analyze with Google Looker Studio ` - :doc:`Integrate two PostgreSQL services ` .. dropdown:: Cluster management diff --git a/docs/products/postgresql/howto/analyze-with-google-data-studio.rst b/docs/products/postgresql/howto/analyze-with-google-data-studio.rst index e6e9f1870a..527b7cfd07 100644 --- a/docs/products/postgresql/howto/analyze-with-google-data-studio.rst +++ b/docs/products/postgresql/howto/analyze-with-google-data-studio.rst @@ -1,12 +1,12 @@ -Report and analyze with Google Data Studio -========================================== +Report and analyze with Google Looker Studio +============================================ -Google Data Studio allows you to create reports and visualisations of the data in your Aiven for PostgreSQL® database, and combine these with data from many other data sources. +Google Looker Studio (previously Data Studio) allows you to create reports and visualisations of the data in your Aiven for PostgreSQL® database, and combine these with data from many other data sources. Variables --------- -These are the values you will need to connect to Google Data Studio: +These are the values you will need to connect to Google Looker Studio: ================== =========================================================================== Variable Description @@ -20,14 +20,14 @@ Variable Description Pre-requisites -------------- -1. You will need a Google account, to access Google Data Studio. +1. You will need a Google account, to access Google Looker Studio. 2. On the Aiven Console service page for your PostgreSQL database, download the CA certificate. The default filename is ``ca.pem``. -Connect your Aiven for PostgreSQL data source to Google Data Studio -------------------------------------------------------------------- +Connect your Aiven for PostgreSQL data source to Google Looker Studio +--------------------------------------------------------------------- -#. Login to Google and open `Google Data Studio `__ . +#. Login to Google and open `Google Looker Studio `__ . #. Select **Create** and choose **Data source**. diff --git a/docs/products/postgresql/howto/check-avoid-transaction-id-wraparound.rst b/docs/products/postgresql/howto/check-avoid-transaction-id-wraparound.rst index ad3d8d9d3f..058d2564ba 100644 --- a/docs/products/postgresql/howto/check-avoid-transaction-id-wraparound.rst +++ b/docs/products/postgresql/howto/check-avoid-transaction-id-wraparound.rst @@ -13,9 +13,11 @@ You can manually trigger a cleanup by executing ``VACUUM FREEZE``, but the autov Check the ``autovacuum`` frequency ---------------------------------- -Aiven for PostgreSQL® sets that number to scale according to the database size, up to 1.5 billion transactions (which leaves 500 million transaction IDs available before a forced freeze), to avoid unnecessary churn for stable data in existing tables. To check your transaction freeze limits, run the following command in your PostgreSQL® instance:: +Aiven for PostgreSQL® sets that number to scale according to the database size, up to 1.5 billion transactions (which leaves 500 million transaction IDs available before a forced freeze), to avoid unnecessary churn for stable data in existing tables. To check your transaction freeze limits, run the following command in your PostgreSQL® instance: - show autovacuum_freeze_max_age +.. code:: + + show autovacuum_freeze_max_age This shows you the number of transactions that trigger autovacuum to start freezing old rows. diff --git a/docs/products/postgresql/howto/claim-public-schema-ownership.rst b/docs/products/postgresql/howto/claim-public-schema-ownership.rst index 7abc570493..ccd9f87436 100644 --- a/docs/products/postgresql/howto/claim-public-schema-ownership.rst +++ b/docs/products/postgresql/howto/claim-public-schema-ownership.rst @@ -3,12 +3,16 @@ Claim public schema ownership When an Aiven for PostgreSQL® instance is created, the ``public`` schema is owned by the ``postgres`` user that is available only to Aiven for management purposes. If changes to the ``public`` schema are required, you can claim the ownership using the ``aiven_extras`` extension as the ``avnadmin`` database user. -1. Enable the ``aiven_extras`` extension:: +1. Enable the ``aiven_extras`` extension: + + .. code:: CREATE EXTENSION aiven_extras CASCADE; -2. Claim the public schema ownership with the dedicated ``claim_public_schema_ownership`` function:: +2. Claim the public schema ownership with the dedicated ``claim_public_schema_ownership`` function: + + .. code:: - SELECT * FROM aiven_extras.claim_public_schema_ownership(); + SELECT * FROM aiven_extras.claim_public_schema_ownership(); Now the ``avnadmin`` user owns the public schema and can modify it. diff --git a/docs/products/postgresql/howto/connect-go.rst b/docs/products/postgresql/howto/connect-go.rst index c5c7d481d3..956b24a613 100644 --- a/docs/products/postgresql/howto/connect-go.rst +++ b/docs/products/postgresql/howto/connect-go.rst @@ -19,9 +19,11 @@ Pre-requisites For this example you will need: -* The Go ``pq`` library:: +* The Go ``pq`` library: - go get github.com/lib/pq + .. code:: + + go get github.com/lib/pq * :doc:`/docs/platform/howto/download-ca-cert` from the service overview page, this example assumes it is in a local file called ``ca.pem``. @@ -39,10 +41,14 @@ This code creates a PostgreSQL client and opens a connection to the database. Th .. note:: This example replaces the query string parameter to specify ``sslmode=verify-ca`` to make sure that the SSL certificate is verified, and adds the location of the cert. -To run the code:: +To run the code: - go run main.go +.. code:: -If the script runs successfully, the outputs should be the PostgreSQL version running in your service like:: + go run main.go - Version: PostgreSQL 13.3 on x86_64-pc-linux-gnu, compiled by gcc, a 68c5366192 p 6520304dc1, 64-bit +If the script runs successfully, the outputs should be the PostgreSQL version running in your service like: + +.. code:: + + Version: PostgreSQL 13.3 on x86_64-pc-linux-gnu, compiled by gcc, a 68c5366192 p 6520304dc1, 64-bit diff --git a/docs/products/postgresql/howto/connect-java.rst b/docs/products/postgresql/howto/connect-java.rst index 4b825fca1f..e9ef8cc944 100644 --- a/docs/products/postgresql/howto/connect-java.rst +++ b/docs/products/postgresql/howto/connect-java.rst @@ -25,9 +25,11 @@ For this example you will need: Download PostgreSQL Driver. There are several options to do that -1. In case you have maven version >= 2+ run the code:: - - mvn org.apache.maven.plugins:maven-dependency-plugin:2.8:get -Dartifact=org.postgresql:postgresql:42.3.2:jar -Ddest=postgresql-42.3.2.jar +1. In case you have maven version >= 2+ run the code: + + .. code:: + + mvn org.apache.maven.plugins:maven-dependency-plugin:2.8:get -Dartifact=org.postgresql:postgresql:42.3.2:jar -Ddest=postgresql-42.3.2.jar 2. Manually the jar could be downloaded from https://jdbc.postgresql.org/download/ @@ -41,16 +43,21 @@ Add the following to ``PostgresqlExample.java`` and replace the placeholder with This code creates a PostgreSQL client and opens a connection to the database. Then runs a query checking the database version and prints the response -Before running the code replace - * **HOST** to ``HOSTNAME`` - * **PORT**: to ``PORT`` - * **DATABASE** to ``DATABASE`` - * **PASSWORD** to ``PASSWORD`` +Before running the code, change: + +* **HOST** to ``HOSTNAME`` +* **PORT**: to ``PORT`` +* **DATABASE** to ``DATABASE`` +* **PASSWORD** to ``PASSWORD`` -To run the code:: +To run the code: + +.. code:: javac PostgresqlExample.java && java -cp postgresql-42.2.24.jar:. PostgresqlExample -host HOST -port PORT -database DATABASE -username avnadmin -password PASSWORD -If the script runs successfully, the outputs should be the PostgreSQL version running in your service like:: +If the script runs successfully, the outputs should be the PostgreSQL version running in your service like: + +.. code:: Version: PostgreSQL 13.4 on x86_64-pc-linux-gnu, compiled by gcc, a cdda7373b4 p 9751fce1e6, 64-bit diff --git a/docs/products/postgresql/howto/connect-node.rst b/docs/products/postgresql/howto/connect-node.rst index f6a7053721..9b8465e6e5 100644 --- a/docs/products/postgresql/howto/connect-node.rst +++ b/docs/products/postgresql/howto/connect-node.rst @@ -23,9 +23,11 @@ Pre-requisites For this example you will need: -* The npm ``pg`` package:: +* The npm ``pg`` package: - npm install pg --save + .. code:: + + npm install pg --save * :doc:`/docs/platform/howto/download-ca-cert` from the service overview page, this example assumes it is in a local file called ``ca.pem``. @@ -39,10 +41,14 @@ Add the following to ``index.js`` and replace the connection parameters with the This code creates a PostgreSQL client and opens a connection to the database. Then runs a query checking the database version and prints the response. -To run the code:: +To run the code: + +.. code:: + + node index.js - node index.js +If the script runs successfully, the outputs should be the PostgreSQL version running in your service like: -If the script runs successfully, the outputs should be the PostgreSQL version running in your service like:: +.. code:: PostgreSQL 13.3 on x86_64-pc-linux-gnu, compiled by gcc, a 68c5366192 p 6520304dc1, 64-bit diff --git a/docs/products/postgresql/howto/connect-php.rst b/docs/products/postgresql/howto/connect-php.rst index d21ef668b7..0ef400c477 100644 --- a/docs/products/postgresql/howto/connect-php.rst +++ b/docs/products/postgresql/howto/connect-php.rst @@ -37,10 +37,14 @@ This code creates a PostgreSQL client and opens a connection to the database. Th .. note:: This example replaces the query string parameter to specify ``sslmode=verify-ca`` to make sure that the SSL certificate is verified, and adds the location of the cert. -To run the code:: +To run the code: - php index.php +.. code:: + + php index.php -If the script runs successfully, the outputs should be the PostgreSQL version running in your service like:: +If the script runs successfully, the outputs should be the PostgreSQL version running in your service like: - PostgreSQL 13.3 on x86_64-pc-linux-gnu, compiled by gcc, a 68c5366192 p 6520304dc1, 64-bit +.. code:: + + PostgreSQL 13.3 on x86_64-pc-linux-gnu, compiled by gcc, a 68c5366192 p 6520304dc1, 64-bit diff --git a/docs/products/postgresql/howto/connect-psql.rst b/docs/products/postgresql/howto/connect-psql.rst index 9119854c2c..3070881755 100644 --- a/docs/products/postgresql/howto/connect-psql.rst +++ b/docs/products/postgresql/howto/connect-psql.rst @@ -22,24 +22,32 @@ For this example you'll need ``psql`` already installed on your computer Connect to PostgreSQL ''''''''''''''''''''' -From your terminal, execute the following code:: +From your terminal, execute the following code: - psql POSTGRESQL_URI +.. code:: + + psql POSTGRESQL_URI -The output should look like the following if the connection is successful:: +The output should look like the following if the connection is successful: +.. code:: + psql (13.2, server 13.3) SSL connection (protocol: TLSv1.3, cipher: TLS_AES_256_GCM_SHA384, bits: 256, compression: off) Type "help" for help. defaultdb=> -To confirm that the connection is working, issue the following code checking the PostgreSQL version:: +To confirm that the connection is working, issue the following code checking the PostgreSQL version: + +.. code:: select version(); -The result will be similar to the following:: +The result will be similar to the following: +.. code:: + version -------------------------------------------------------------------------------------------- PostgreSQL 13.3 on x86_64-pc-linux-gnu, compiled by gcc, a 68c5366192 p 6520304dc1, 64-bit diff --git a/docs/products/postgresql/howto/connect-python.rst b/docs/products/postgresql/howto/connect-python.rst index 526b98a382..9b52b676ef 100644 --- a/docs/products/postgresql/howto/connect-python.rst +++ b/docs/products/postgresql/howto/connect-python.rst @@ -21,9 +21,11 @@ For this example you will need: * Python 3.6 or later -* The Python ``psycopg2`` library. You can install this with ``pip``:: +* The Python ``psycopg2`` library. You can install this with ``pip``: - pip install psycopg2 + .. code:: + + pip install psycopg2 Code @@ -40,10 +42,14 @@ This code creates a PostgreSQL client and connects to the database. Then runs a .. note:: By default, the connection string specifies ``sslmode=require`` which does not verify the CA certificate. A better approach for production would be to change it to ``sslmode=verify-ca`` and include the certificate. -To run the code:: +To run the code: - python main.py +.. code:: + + python main.py -If the script runs successfully, the outputs should be the PostgreSQL version running in your service like:: +If the script runs successfully, the outputs should be the PostgreSQL version running in your service like: - PostgreSQL 13.3 on x86_64-pc-linux-gnu, compiled by gcc, a 68c5366192 p 6520304dc1, 64-bit +.. code:: + + PostgreSQL 13.3 on x86_64-pc-linux-gnu, compiled by gcc, a 68c5366192 p 6520304dc1, 64-bit diff --git a/docs/products/postgresql/howto/create-database.rst b/docs/products/postgresql/howto/create-database.rst index 8bf56d6b5d..5823dd6282 100644 --- a/docs/products/postgresql/howto/create-database.rst +++ b/docs/products/postgresql/howto/create-database.rst @@ -17,4 +17,4 @@ To create a new PostgreSQL® database: .. Tip:: - You can also use the :ref:`Aiven client ` or the :doc:`PostgreSQL client` to create your database from the CLI. + You can also use the :ref:`Aiven client ` or the :doc:`PostgreSQL client ` to create your database from the CLI. diff --git a/docs/products/postgresql/howto/create-manual-backups.rst b/docs/products/postgresql/howto/create-manual-backups.rst index e6806b8467..e4306eb5a0 100644 --- a/docs/products/postgresql/howto/create-manual-backups.rst +++ b/docs/products/postgresql/howto/create-manual-backups.rst @@ -19,12 +19,14 @@ Variable Description Create backups with ``pg_dump`` ''''''''''''''''''''''''''''''' -Perform a backup of your database using the standard PostgreSQL ``pg_dump`` command. Full detail on the parameters can be found in the `associated documentation `_, but a typical command would look something like this:: - - pg_dump 'POSTGRESQL_URI' \ - -f backup_folder \ - -j 2 \ - -F directory +Perform a backup of your database using the standard PostgreSQL ``pg_dump`` command. Full detail on the parameters can be found in the `associated documentation `_, but a typical command would look something like this: + +.. code:: + + pg_dump 'POSTGRESQL_URI' \ + -f backup_folder \ + -j 2 \ + -F directory This command creates a backup in ``directory`` format (ready for use with ``pg_restore``) using 2 concurrent jobs and storing the output to a folder called ``backup_folder``. diff --git a/docs/products/postgresql/howto/create-read-replica.rst b/docs/products/postgresql/howto/create-read-replica.rst index a2a073320f..4be43712d0 100644 --- a/docs/products/postgresql/howto/create-read-replica.rst +++ b/docs/products/postgresql/howto/create-read-replica.rst @@ -40,17 +40,21 @@ Use a replica To use a read only replica: 1. Log in to the Aiven web console and select your PostgreSQL service. -2. In the **Overview** page, copy the **Replica URI** an use it to connect via ``psql``:: - - psql POSTGRESQL_REPLICA_URI +2. In the **Overview** page, copy the **Replica URI** an use it to connect via ``psql``: + + .. code:: + + psql POSTGRESQL_REPLICA_URI Identify replica status ----------------------- -To check whether you are connected to a primary or replica node, run the following command within a ``psql`` terminal already connected to a database:: - - SELECT * FROM pg_is_in_recovery(); +To check whether you are connected to a primary or replica node, run the following command within a ``psql`` terminal already connected to a database: + +.. code:: + + SELECT * FROM pg_is_in_recovery(); If the above command returns ``TRUE`` if you are connected to the replica, and ``FALSE`` if you are connected to the primary server. diff --git a/docs/products/postgresql/howto/enable-jit.rst b/docs/products/postgresql/howto/enable-jit.rst index b4bf5ad38c..65a497d4cf 100644 --- a/docs/products/postgresql/howto/enable-jit.rst +++ b/docs/products/postgresql/howto/enable-jit.rst @@ -10,27 +10,28 @@ Enable JIT on the global level You can enable JIT for the complete Aiven for PostgreSQL service both via `Aiven Console `_ and :doc:`Aiven CLI `. -To enable JIT in the `Aiven console `_, take the following steps: +To enable JIT in the `Aiven Console `_, take the following steps: -#. Log in to the `Aiven web console `_. +#. Log in to the `Aiven Console `_. #. From the **Services** page, select the the Aiven for PostgreSQL service where you want to enable JIT. -#. In your service's **Overview** page, scroll down to the **Advanced configuration** settings, and select **Change**. -#. In the **Edit advanced configuration** window, select **Add configuration option**. -#. Select parameter ``pg.jit``, and switch the toggle to ``on``. -#. Select **Save advanced configuration**. +#. From the sidebar on your service's page, select **Service settings**. +#. On the **Service settings** page, navigate to the **Advanced configuration** section, and select **Configure**. +#. In the **Advanced configuration** window, select **Add configuration options**. +#. Select parameter ``pg.jit``, and switch the toggle to ``Enabled``. +#. Select **Save configuration**. To enable JIT via :doc:`Aiven CLI `, you can use the :ref:`service update command `: .. code:: - avn service update -c pg.jit=true PG_SERVICE_NAME + avn service update --project PROJECT_NAME -c pg.jit=true PG_SERVICE_NAME Enable JIT for a specific database ---------------------------------- You might not want to use JIT for most simple queries since it would increase the cost. JIT can also be enabled for a single database: -1. Connect to the database where you want to enable JIT. E.g. with ``psql`` and the service URI available in the Aiven for PostgreSQL service overview console page +1. Connect to the database where you want to enable JIT, for example, with ``psql`` and the service URI available in `Aiven Console `_ > your Aiven for PostgreSQL service > the **Overview** page. .. code:: diff --git a/docs/products/postgresql/howto/list-dba-tasks.rst b/docs/products/postgresql/howto/list-dba-tasks.rst index 3b9a37c47f..6b3a596488 100644 --- a/docs/products/postgresql/howto/list-dba-tasks.rst +++ b/docs/products/postgresql/howto/list-dba-tasks.rst @@ -73,4 +73,8 @@ Database administration tasks .. grid-item-card:: :doc:`Check size of a database, a table or an index ` :shadow: md - :margin: 2 2 0 0 \ No newline at end of file + :margin: 2 2 0 0 + + .. grid-item-card:: :doc:`Restrict access to databases or tables in Aiven for PostgreSQL®". ` + :shadow: md + :margin: 2 2 0 0 diff --git a/docs/products/postgresql/howto/list-integrations.rst b/docs/products/postgresql/howto/list-integrations.rst index 202d99e7ab..77b4fd31a0 100644 --- a/docs/products/postgresql/howto/list-integrations.rst +++ b/docs/products/postgresql/howto/list-integrations.rst @@ -19,7 +19,7 @@ Aiven for PostgreSQL® integrations :shadow: md :margin: 2 2 0 0 - .. grid-item-card:: :doc:`Report and analyze with Google Data Studio ` + .. grid-item-card:: :doc:`Report and analyze with Google Looker Studio ` :shadow: md :margin: 2 2 0 0 diff --git a/docs/products/postgresql/howto/logical-replication-gcp-cloudsql.rst b/docs/products/postgresql/howto/logical-replication-gcp-cloudsql.rst index a61c976f01..552057fcea 100644 --- a/docs/products/postgresql/howto/logical-replication-gcp-cloudsql.rst +++ b/docs/products/postgresql/howto/logical-replication-gcp-cloudsql.rst @@ -13,6 +13,8 @@ If you have not enabled logical replication on Google Cloud SQL PostgreSQL® alr .. image:: /images/products/postgresql/migrate-cloudsql-network.png :alt: Cloud SQL PostgreSQL network -3. Set replication role to PostgreSQL user (or the user will be used for migration) in Cloud SQL PostgreSQL:: - - ALTER ROLE postgres REPLICATION; +3. Set replication role to PostgreSQL user (or the user will be used for migration) in Cloud SQL PostgreSQL: + + .. code:: + + ALTER ROLE postgres REPLICATION; diff --git a/docs/products/postgresql/howto/manage-pool.rst b/docs/products/postgresql/howto/manage-pool.rst index 07227ac12f..96aa25b21a 100644 --- a/docs/products/postgresql/howto/manage-pool.rst +++ b/docs/products/postgresql/howto/manage-pool.rst @@ -50,10 +50,14 @@ Connection pools for replicas For all Business and Premium plans, whenever you define a connection pool, the same connection pool is created both for primary and standby servers. For standby servers, the connection pool URI is exactly the same as for the primary server, except that the host name has a ``replica-`` prefix. -For example, if the primary connection URI is as follows:: +For example, if the primary connection URI is as follows: + +.. code:: postgres://avnadmin:password@pg-prod-myproject.aivencloud.com:20986/mypool?params -The replica connection pool URI is as follows:: +The replica connection pool URI is as follows: +.. code:: + postgres://avnadmin:password@replica-pg-prod-myproject.aivencloud.com:20986/mypool?params diff --git a/docs/products/postgresql/howto/migrate-aiven-db-migrate.rst b/docs/products/postgresql/howto/migrate-aiven-db-migrate.rst index 65b2845310..fec57c342b 100644 --- a/docs/products/postgresql/howto/migrate-aiven-db-migrate.rst +++ b/docs/products/postgresql/howto/migrate-aiven-db-migrate.rst @@ -38,7 +38,7 @@ In order to use the **logical replication** method, you'll need the following: .. code:: - avn service create -t pg -p DEST_PG_PLAN DEST_PG_NAME + avn service create --project PROJECT_NAME -t pg -p DEST_PG_PLAN DEST_PG_NAME 2. Enable the ``aiven_extras`` extension in the Aiven for PostgreSQL® target database as written in the :ref:`dedicated document `. @@ -93,13 +93,13 @@ You can initiate a migration to an Aiven for PostgreSQL® service with the :doc: .. code:: bash - avn service update -c migration.host=SRC_HOSTNAME \ - -c migration.port=SRC_PORT \ - -c migration.ssl=true \ - -c migration.username=SRC_USERNAME \ - -c migration.password=SRC_PASSWORD \ - -c migration.dbname=DST_DBNAME \ - -c migration.ignore_dbs=DB_TO_SKIP \ + avn service update --project PROJECT_NAME -c migration.host=SRC_HOSTNAME \ + -c migration.port=SRC_PORT \ + -c migration.ssl=true \ + -c migration.username=SRC_USERNAME \ + -c migration.password=SRC_PASSWORD \ + -c migration.dbname=DST_DBNAME \ + -c migration.ignore_dbs=DB_TO_SKIP \ DEST_PG_NAME .. Note:: @@ -113,10 +113,9 @@ You can check the migration status using the :doc:`Aiven CLI ` .. code:: bash - avn --show-http service migration-status \ + avn --project PROJECT_NAME --show-http service migration-status \ DEST_PG_NAME - .. Note:: There may be delay for migration status to update the current progress, keep running this command to see the most up-to-date status. @@ -176,7 +175,7 @@ The migration process can be stopped with: .. code:: bash - avn service update --remove-option migration DEST_PG_NAME + avn service update --project PROJECT_NAME --remove-option migration DEST_PG_NAME The above command removes all logical replication-related objects from both source and destination cluster. diff --git a/docs/products/postgresql/howto/migrate-cloud-region.rst b/docs/products/postgresql/howto/migrate-cloud-region.rst index 2e19bd27e3..f748c17167 100644 --- a/docs/products/postgresql/howto/migrate-cloud-region.rst +++ b/docs/products/postgresql/howto/migrate-cloud-region.rst @@ -6,8 +6,10 @@ Any Aiven service can be relocated to a different cloud vendor or region. This i To migrate a PostgreSQL service to a new cloud provider/region 1. Log in to `Aiven Console `_, and select the PostgreSQL instance you want to move. -2. In the **Overview** page, select **Migrate cloud**. -3. Select the new cloud provider and region where you want to deploy the PostgreSQL instance, then click **Create** +2. From the sidebar on your service's page, select **Service settings**. +3. On the **Service settings** page, navigate to the **Cloud and network** section, and select **Change cloud or region**. +4. In the **Migrate service to another cloud** window, select the new cloud provider and region where you want to deploy the PostgreSQL instance. +5. Select **Migrate**. The PostgreSQL cluster will enter the ``REBALANCING`` state, still serving queries from the old provider/region. diff --git a/docs/products/postgresql/howto/migrate-db-to-aiven-via-console.rst b/docs/products/postgresql/howto/migrate-db-to-aiven-via-console.rst index dc80ec178c..f28da50846 100644 --- a/docs/products/postgresql/howto/migrate-db-to-aiven-via-console.rst +++ b/docs/products/postgresql/howto/migrate-db-to-aiven-via-console.rst @@ -208,13 +208,14 @@ Migrate a database 1. Log in to the `Aiven Console `_. 2. On the **Services** page, select the service where your target database is located. -3. On the **Overview** page of the selected service, scroll down to the **Migrate database** section and select **Set up migration**. -4. Guided by the migration wizard, go through all the migration steps. +3. From the sidebar on your service's page, select **Service settings**. +4. On the **Service settings** page, navigate to the **Service management** section, and select **Import database**. +5. Guided by the migration wizard, go through all the migration steps. Step 1: Configure ''''''''''''''''' -Get familiar with the guidelines provided in the **PostgreSQL migration configuration guide** window and select **Get started**. +Get familiar with the guidelines provided in the **PostgreSQL migration configuration guide** window, make sure your configuration is in line with them, and select **Get started**. Step 2: Validation '''''''''''''''''' @@ -252,7 +253,7 @@ Trigger the migration by selecting **Start migration** in the **Database migrati While the migration is in progress, you can take the following actions: -* Let it proceed until completed by selecting **Close window**, which closes the wizard. You can come back to check the status at any time on the **Overview** page of the service in the **Migrate database** section. +* Let it proceed until completed by selecting **Close window**, which closes the wizard. You can come back to check the status at any time on the **Service settings** page > the **Service management** section > **Import database**. * Write to the target database. * Discontinue the migration by selecting **Stop migration**. Although the data already migrated is retained, you cannot restart the stopped process. To continue with the migration, you need to start a new migration process from scratch. @@ -277,14 +278,14 @@ As soon as the wizard communicates the completion of the migration, check if the This information in the wizard means that your data has been transferred to Aiven, but some new data is still continuously being synced between the connected databases. -* If there is no replication in progress, select **Close connection** in the migration wizard to finalize the migration process. As a result, on the **Overview** page of the service > the **Migrate database** section, you'll see the **Ready** tag. -* If the replication mode is active, you can select **Keep replicating**. As a result, on the **Overview** page of the service > the **Migrate database** section, you'll see the **Syncing** tag, and you'll be able to check the status of the migration process by selecting **Status update**. +* If there is no replication in progress, select **Close connection** in the migration wizard to finalize the migration process. As a result, on the **Service settings** page > the **Service management** section > **Import database**, you'll see the **Ready** tag. +* If the replication mode is active, you can select **Keep replicating**. As a result, on the **Service settings** page > the **Service management** section > **Import database**, you'll see the **Syncing** tag, and you'll be able to check the status of the migration process by selecting **Status update**. .. topic:: Result You have successfully migrated your PostgreSQL database into you Aiven for PostgreSQL service. -Related reading +Related pages --------------- - :doc:`About aiven-db-migrate ` diff --git a/docs/products/postgresql/howto/migrate-pg-dump-restore.rst b/docs/products/postgresql/howto/migrate-pg-dump-restore.rst index 24cb7002ee..fe7069b3f5 100644 --- a/docs/products/postgresql/howto/migrate-pg-dump-restore.rst +++ b/docs/products/postgresql/howto/migrate-pg-dump-restore.rst @@ -36,7 +36,7 @@ Perform the migration .. code:: - avn service create -t pg -p DEST_PG_PLAN DEST_PG_NAME + avn service create --project PROJECT_NAME -t pg -p DEST_PG_PLAN DEST_PG_NAME .. Tip:: diff --git a/docs/products/postgresql/howto/monitor-database-with-datadog.rst b/docs/products/postgresql/howto/monitor-database-with-datadog.rst index eac51a2a97..37b4f98e35 100644 --- a/docs/products/postgresql/howto/monitor-database-with-datadog.rst +++ b/docs/products/postgresql/howto/monitor-database-with-datadog.rst @@ -35,7 +35,7 @@ Using the ``avn service integration-list`` :ref:`Aiven CLI command + avn service integration-update --project --user-config '{"datadog_dbm_enabled": true}' * Check if user-config ``datadog_dbm_enabled`` set correctly: diff --git a/docs/products/postgresql/howto/monitor-with-pgwatch2.rst b/docs/products/postgresql/howto/monitor-with-pgwatch2.rst index 5c2874d834..f661012cf3 100644 --- a/docs/products/postgresql/howto/monitor-with-pgwatch2.rst +++ b/docs/products/postgresql/howto/monitor-with-pgwatch2.rst @@ -13,34 +13,43 @@ Prepare an Aiven for PostgreSQL instance for pgwatch2 The following steps need to be executed on the Aiven for PostgreSQL instance to be monitored with pgwatch2: -1. Create a user for pgwatch2 (with a sensible password):: +1. Create a user for pgwatch2 (with a sensible password): - CREATE USER pgwatch2 WITH PASSWORD 'password'; + .. code:: + + CREATE USER pgwatch2 WITH PASSWORD 'password'; -2. Limit the number of connections from pgwatch2 (optional, but recommended in the `pgwatch2 documentation`_):: +2. Limit the number of connections from pgwatch2 (optional, but recommended in the `pgwatch2 documentation`_): ALTER ROLE pgwatch2 CONNECTION LIMIT 3; -3. Allow pgwatch2 to read database statistics:: +3. Allow pgwatch2 to read database statistics: GRANT pg_read_all_stats TO pgwatch2; -4. If you want to collect data gathered from the ``pg_stat_statements`` extension, it needs to be enabled:: +4. If you want to collect data gathered from the ``pg_stat_statements`` extension, it needs to be enabled: + + .. code:: - CREATE EXTENSION IF NOT EXISTS pg_stat_statements; + CREATE EXTENSION IF NOT EXISTS pg_stat_statements; -5. The `pgwatch2 documentation`_ recommends to enable timing of database I/O calls by setting the PostgreSQL configuration parameter ``track_io_timing`` (see :doc:`/docs/products/postgresql/reference/list-of-advanced-params`). - .. warning:: According to the `PostgreSQL documentation`_, setting ``track_io_timing = on`` can cause significant overhead. +5. The `pgwatch2 documentation`_ recommends to enable timing of database I/O calls by setting the PostgreSQL configuration parameter ``track_io_timing`` (see :doc:`/docs/products/postgresql/reference/advanced-params`). + + .. warning:: + + According to the `PostgreSQL documentation`_, setting ``track_io_timing = on`` can cause significant overhead. Running pgwatch2 ---------------- -pgwatch2 has multiple `installation options`_ to choose from. For the sake of simplicity, the following example uses `ad-hoc mode`_ with a Docker container:: +pgwatch2 has multiple `installation options`_ to choose from. For the sake of simplicity, the following example uses `ad-hoc mode`_ with a Docker container: + +.. code:: - docker run --rm -p 3000:3000 -p 8080:8080 \ - -e PW2_ADHOC_CONN_STR='postgres://pgwatch2:password@HOST:PORT/defaultdb?sslmode=require' \ - -e PW2_ADHOC_CONFIG='rds' --name pw2 cybertec/pgwatch2-postgres + docker run --rm -p 3000:3000 -p 8080:8080 \ + -e PW2_ADHOC_CONN_STR='postgres://pgwatch2:password@HOST:PORT/defaultdb?sslmode=require' \ + -e PW2_ADHOC_CONFIG='rds' --name pw2 cybertec/pgwatch2-postgres This runs pgwatch2 with the container image provided by CYBERTEC. ``PW2_ADHOC_CONN_STR`` is set to the connection string of the PostgreSQL instance to be monitored, copied from the `Aiven web console`_ replacing the username/password have been replaced by the ones specifically created for pgwatch2. Please consult the `pgwatch2 documentation`_ to decide on the best way to set up pgwatch2 in your environment. diff --git a/docs/products/postgresql/howto/pagila.rst b/docs/products/postgresql/howto/pagila.rst index 943d2c0abd..0636035fe9 100644 --- a/docs/products/postgresql/howto/pagila.rst +++ b/docs/products/postgresql/howto/pagila.rst @@ -16,37 +16,37 @@ Before exploring the Pagila database, follow the :doc:`create new service articl 1. Download the ``pagila-data.sql`` from our `GitHub repository `_. -.. Tip:: - You may use the following command on your terminal:: + .. Tip:: + You may use the following command on your terminal: - wget https://raw.githubusercontent.com/aiven/devportal/main/code/products/postgresql/pagila/pagila-data.sql + .. code:: + + wget https://raw.githubusercontent.com/aiven/devportal/main/code/products/postgresql/pagila/pagila-data.sql 2. Connect to the PostgreSQL instance using the following command. The ``SERVICE_URI`` value can be found in the Aiven Console dashboard. -.. code:: shell + .. code:: shell - psql 'SERVICE_URI' + psql 'SERVICE_URI' 3. Within the ``psql`` shell, create a database named ``pagila`` and connect to it with the command below: -.. code:: psql - - CREATE DATABASE pagila; - \c pagila; + .. code:: psql + + CREATE DATABASE pagila; + \c pagila; 4. Populate the database with the command below. This might take some time. -.. code:: psql + .. code:: psql - \i pagila-data.sql; + \i pagila-data.sql; 5. Once the command finishes, make sure to reconnect to the database to access the imported data: -.. code:: psql - - \c pagila; + .. code:: psql -**You are ready to go!** You can use the :ref:`Sample queries` section below to explore the database. Have fun! + \c pagila; Entity-relationship model diagram --------------------------------- diff --git a/docs/products/postgresql/howto/pgbouncer-stats.rst b/docs/products/postgresql/howto/pgbouncer-stats.rst index b9276aa399..96b8629a9e 100644 --- a/docs/products/postgresql/howto/pgbouncer-stats.rst +++ b/docs/products/postgresql/howto/pgbouncer-stats.rst @@ -8,25 +8,33 @@ Get PgBouncer URL PgBouncer URL can be checked under **Pools** in `Aiven Console `_ > your Aiven for PostgreSQL® service's page > **Pools** view (available from the sidebar). Alternatively, it can be extracted via :doc:`Aiven Command Line interface`, using ``jq`` (https://stedolan.github.io/jq/) to parse the JSON response. -Execute the following command replacing the ``INSTANCE_NAME`` parameter with the name of your instance:: +Execute the following command replacing the ``INSTANCE_NAME`` parameter with the name of your instance: - avn service get INSTANCE_NAME --json | jq -r '.connection_info.pgbouncer' +.. code:: -The output will be similar to the below:: + avn service get INSTANCE_NAME --project PROJECT_NAME --json | jq -r '.connection_info.pgbouncer' +The output will be similar to the below: + +.. code:: + postgres://avnadmin:xxxxxxxxxxx@demo-pg-dev-advocates.aivencloud.com:13040/pgbouncer?sslmode=require Connect to PgBouncer -------------------- -Connect to PgBouncer using the URL extracted above and show the statistics with:: +Connect to PgBouncer using the URL extracted above and show the statistics with: + +.. code:: + + pgbouncer=# SHOW STATS; - pgbouncer=# SHOW STATS; +Depending on the load of your database, the output will be similar to: -Depending on the load of your database, the output will be similar to:: +.. code:: database | total_xact_count | total_query_count | total_received | total_sent | total_xact_time | total_query_time | total_wait_time | avg_xact_count | avg_query_count | avg_recv | avg_sent | avg_xact_time | avg_query_time | avg_wait_time - -----------+------------------+-------------------+----------------+------------+-----------------+------------------+-----------------+----------------+-----------------+----------+----------+---------------+----------------+--------------- + ----------+------------------+-------------------+----------------+------------+-----------------+------------------+-----------------+----------------+-----------------+----------+----------+---------------+----------------+--------------- pgbouncer | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 (1 row) diff --git a/docs/products/postgresql/howto/prevent-full-disk.rst b/docs/products/postgresql/howto/prevent-full-disk.rst index 40c684499f..17591bdfb9 100644 --- a/docs/products/postgresql/howto/prevent-full-disk.rst +++ b/docs/products/postgresql/howto/prevent-full-disk.rst @@ -5,9 +5,11 @@ If your Aiven for PostgreSQL® service runs out of disk space, the service will To prevent this situation, Aiven automatically detects when your service is running out of free space and stops further write operations by setting the ``default_transaction_read_only`` parameter to ``ON``. -With this setting in place, clients trying to execute write operations will start facing errors like:: +With this setting in place, clients trying to execute write operations will start facing errors like: - cannot execute CREATE TABLE in a read-only transaction. +.. code:: + + cannot execute CREATE TABLE in a read-only transaction. To re-enable database writes you need to increase the available space, by either deleting data or upgrading to a larger plan. @@ -20,8 +22,9 @@ You can upgrade the Aiven for PostgreSQL service plan via the `Aiven console `_: 1. Log in to `Aiven Console `_, and select your Aiven for PostgreSQL service. -2. In the **Overview** page of your service, scroll down to the **Service plan** section, and select **Change plan**. -3. In the **Change service plan** window, select a new plan with a higher capacity, and select **Change**. +2. Select **Service settings** from the sidebar of your service's page. +3. Navigate to the **Service plan** section, and select **Change plan** from the **Actions** (**...**) menu. +4. In the **Change service plan** window, select a new plan with a higher capacity, and select **Change**. Once the new nodes with the increased disk capacity are up and running, the disk usage drops below the critical level and the system automatically sets the ``default_transaction_read_only`` parameter to ``OFF`` allowing write operations again. diff --git a/docs/products/postgresql/howto/readonly-user.rst b/docs/products/postgresql/howto/readonly-user.rst new file mode 100644 index 0000000000..ec94d4f479 --- /dev/null +++ b/docs/products/postgresql/howto/readonly-user.rst @@ -0,0 +1,28 @@ +Restrict access to databases or tables in Aiven for PostgreSQL® +=============================================================== + +You can restrict access to Aiven for PostgreSQL® databases and tables by setting up read-only permissions for specific user's roles. + +Set read-only access in a schema +-------------------------------- + +1. Modify default permissions for a user's role in a particular schema. + +.. code-block:: bash + + alter default privileges for role name_of_role in schema name_of_schema YOUR_GRANT_OR_REVOKE_PERMISSIONS + +2. Apply the new read-only access setting to your existing database objects that uses the affected schema. + +.. code-block:: bash + + grant select on all tables in schema name_of_schema to NAME_OF_READ_ONLY_ROLE + +Set read-only access in a database +---------------------------------- + +You can set up the read-only access for a specific user's role in a particular database. + +1. Create a new database which will be used as a template ``create database ro__template...``. +2. For the new template database, set permissions and roles that you want as default ones in the template. +3. When creating a new database, use ``create database NAME with template = 'ro__template'``. diff --git a/docs/products/postgresql/howto/restore-backup.rst b/docs/products/postgresql/howto/restore-backup.rst index ad71b07f7b..22b63ceb50 100644 --- a/docs/products/postgresql/howto/restore-backup.rst +++ b/docs/products/postgresql/howto/restore-backup.rst @@ -8,13 +8,15 @@ Aiven for PostgreSQL® databases are automatically backed up and can be restored To restore a PostgreSQL database, take the following steps: -1. Log in to the `Aiven web console `_. +1. Log in to the `Aiven Console `_. 2. Select your Aiven for PostgreSQL service from the **Services** page. -3. In the **Overview** page of your service, select **New database fork**. -4. Enter a service name and choose a project name, database version, cloud region and plan for the new instance. -5. Select the **Source service state** defining the backup point, the options are as follows: +3. Select **Service settings** from the sidebar of your service's page. +4. Navigate to the **Service management** section, and select **Fork database** from the **Actions** (**...**) menu. +5. Enter a service name, and choose a project name, database version, cloud region and plan for the new instance. +6. Select the **Source service state** defining the backup point, the options are as follows: * **Latest transaction** * **Point in time** - the date selector allows to chose a precise point in time within the available backup retention period. +7. With all the fork settings configured, select **Create fork**. Once the new service is running, you can change your application's connection settings to point to it. diff --git a/docs/products/postgresql/howto/setup-logical-replication.rst b/docs/products/postgresql/howto/setup-logical-replication.rst index d738e5ded4..31fde23614 100644 --- a/docs/products/postgresql/howto/setup-logical-replication.rst +++ b/docs/products/postgresql/howto/setup-logical-replication.rst @@ -50,36 +50,47 @@ This example assumes a source database called ``origin_database`` on a self-mana 1. On the source cluster, connect to the ``origin_database`` with ``psql``. -2. Create the ``PUBLICATION`` entry, named ``pub_source_tables``, for the test tables:: - - CREATE PUBLICATION pub_source_tables - FOR TABLE test_table,test_table_2,test_table_3 - WITH (publish='insert,update,delete'); +2. Create the ``PUBLICATION`` entry, named ``pub_source_tables``, for the test tables: + + .. code:: + + CREATE PUBLICATION pub_source_tables + FOR TABLE test_table,test_table_2,test_table_3 + WITH (publish='insert,update,delete'); -.. Tip:: - In PostgreSQL 10 and above, ``PUBLICATION`` entries define the tables to be replicated, which are in turn ``SUBSCRIBED`` to by the receiving database. + .. Tip:: + + In PostgreSQL 10 and above, ``PUBLICATION`` entries define the tables to be replicated, which are in turn ``SUBSCRIBED`` to by the receiving database. - When creating a publication entry, the ``publish`` parameter defines the operations to transfer. In the above example, all the ``INSERT``, ``UPDATE`` or ``DELETE`` operations will be transferred. + When creating a publication entry, the ``publish`` parameter defines the operations to transfer. In the above example, all the ``INSERT``, ``UPDATE`` or ``DELETE`` operations will be transferred. -3. PostgreSQL's logical replication doesn't copy table definitions, that can be extracted from the ``origin_database`` with ``pg_dump`` and included in a ``origin-database-schema.sql`` file with:: +3. PostgreSQL's logical replication doesn't copy table definitions, that can be extracted from the ``origin_database`` with ``pg_dump`` and included in a ``origin-database-schema.sql`` file with: - pg_dump --schema-only --no-publications \ - SRC_CONN_URI \ - -t test_table -t test_table_2 -t test_table_3 > origin-database-schema.sql + .. code:: + + pg_dump --schema-only --no-publications \ + SRC_CONN_URI \ + -t test_table -t test_table_2 -t test_table_3 > origin-database-schema.sql -4. Connect via ``psql`` to the destination Aiven for PostgreSQL database and create the new ``aiven_extras`` extension:: +4. Connect via ``psql`` to the destination Aiven for PostgreSQL database and create the new ``aiven_extras`` extension: - CREATE EXTENSION aiven_extras CASCADE; + .. code:: + + CREATE EXTENSION aiven_extras CASCADE; -5. Create the table definitions in the Aiven for PostgreSQL destination database within ``psql``:: +5. Create the table definitions in the Aiven for PostgreSQL destination database within ``psql``: - \i origin-database-schema.sql + .. code:: + + \i origin-database-schema.sql -6. Create a ``SUBSCRIPTION`` entry, named ``dest_subscription``, in the Aiven for PostgreSQL destination database to start replicating changes from the source ``pub_source_tables`` publication:: +6. Create a ``SUBSCRIPTION`` entry, named ``dest_subscription``, in the Aiven for PostgreSQL destination database to start replicating changes from the source ``pub_source_tables`` publication: - SELECT * FROM - aiven_extras.pg_create_subscription( + .. code:: + + SELECT * FROM + aiven_extras.pg_create_subscription( 'dest_subscription', 'host=SRC_HOST password=SRC_PASSWORD port=SRC_PORT dbname=SRC_DATABASE user=SRC_USER', 'pub_source_tables', @@ -88,24 +99,28 @@ This example assumes a source database called ``origin_database`` on a self-mana TRUE); -7. Verify that the subscription has been created successfully. As the ``pg_subscription`` catalog is superuser-only, you can use the ``aiven_extras.pg_list_all_subscriptions()`` function from ``aiven_extras`` extension:: +7. Verify that the subscription has been created successfully. As the ``pg_subscription`` catalog is superuser-only, you can use the ``aiven_extras.pg_list_all_subscriptions()`` function from ``aiven_extras`` extension: - SELECT subdbid, subname, subowner, subenabled, subslotname - FROM aiven_extras.pg_list_all_subscriptions(); + .. code:: + + SELECT subdbid, subname, subowner, subenabled, subslotname + FROM aiven_extras.pg_list_all_subscriptions(); subdbid | subname | subowner | subenabled | subslotname ---------+-------------------+----------+------------+------------- 16401 | dest_subscription | 10 | t | dest_slot - (1 row) - -8. Verify the subscription status:: + (1 row) - SELECT * FROM pg_stat_subscription; +8. Verify the subscription status: - subid | subname | pid | relid | received_lsn | last_msg_send_time | last_msg_receipt_time | latest_end_lsn | latest_end_time - -------+-------------------+-----+-------+--------------+-------------------------------+-------------------------------+----------------+------------------------------- - 16444 | dest_subscription | 869 | | 0/C002360 | 2021-06-25 12:06:59.570865+00 | 2021-06-25 12:06:59.571295+00 | 0/C002360 | 2021-06-25 12:06:59.570865+00 - (1 row) + .. code:: + + SELECT * FROM pg_stat_subscription; + + subid | subname | pid | relid | received_lsn | last_msg_send_time | last_msg_receipt_time | latest_end_lsn | latest_end_time + -------+-------------------+-----+-------+--------------+-------------------------------+-------------------------------+----------------+------------------------------- + 16444 | dest_subscription | 869 | | 0/C002360 | 2021-06-25 12:06:59.570865+00 | 2021-06-25 12:06:59.571295+00 | 0/C002360 | 2021-06-25 12:06:59.570865+00 + (1 row) 9. Verify the data is correctly copied over the Aiven for PostgreSQL target tables @@ -115,18 +130,22 @@ Remove unused replication setup It is important to remove unused replication setups, since the underlying replication slots in PostgreSQL forces the server to keep all the data needed to replicate since the publication creation time. If the data stream has no readers, there will be an ever-growing amount of data on disk until it becomes full. -To remove an unused subscription, essentially stopping the replication, run the following command in the Aiven for PostgreSQL target database:: +To remove an unused subscription, essentially stopping the replication, run the following command in the Aiven for PostgreSQL target database: - SELECT * FROM aiven_extras.pg_drop_subscription('dest_subscription'); +.. code:: + + SELECT * FROM aiven_extras.pg_drop_subscription('dest_subscription'); -Verify the replication removal with:: +Verify the replication removal with: - SELECT * FROM aiven_extras.pg_list_all_subscriptions(); +.. code:: + + SELECT * FROM aiven_extras.pg_list_all_subscriptions(); - subdbid | subname | subowner | subenabled | subconninfo | subslotname | subsynccommit | subpublications - ---------+---------+----------+------------+-------------+-------------+---------------+----------------- - (0 rows) + subdbid | subname | subowner | subenabled | subconninfo | subslotname | subsynccommit | subpublications + ---------+---------+----------+------------+-------------+-------------+---------------+----------------- + (0 rows) Manage inactive or lagging replication slots @@ -134,40 +153,48 @@ Manage inactive or lagging replication slots Inactive or lagging replication could cause problems in a database, like an ever-increasing disk usage not associated to any growth of the amount of data in the database. Filling the disk causes the database instance to stop serving clients and thus a loss of service. -1. Assess the replication slots status via ``psql``:: +1. Assess the replication slots status via ``psql``: - SELECT slot_name,restart_lsn FROM pg_replication_slots; + .. code:: + + SELECT slot_name,restart_lsn FROM pg_replication_slots; -The command output is like:: + The command output is like: - slot_name │ restart_lsn - ───────────────┼───────────── - pghoard_local │ 6E/16000000 - dest_slot | 5B/8B0 - (2 rows) + .. code:: -2. Compare the ``restart_lsn`` values between the replication slot in analysis (``dest_slot`` in the above example) and ``pghoard_local``: the hexadecimal difference between the them states how many write-ahead-logging (WAL) entries are waiting for the target ``dest_slot`` connector to catch up. + slot_name │ restart_lsn + ───────────────┼───────────── + pghoard_local │ 6E/16000000 + dest_slot | 5B/8B0 + (2 rows) -.. Note:: - In the above example the difference is 0x6E - 0x5B = 19 entries +2. Compare the ``restart_lsn`` values between the replication slot in analysis (``dest_slot`` in the above example) and ``pghoard_local``: the hexadecimal difference between the them states how many write-ahead-logging (WAL) entries are waiting for the target ``dest_slot`` connector to catch up. + .. Note:: + + In the above example the difference is 0x6E - 0x5B = 19 entries 3. If, after assessing the lag, the ``dest_slot`` connector results lagging or inactive: -* If the ``dest_slot`` connector is still in use, a recommended approach is to restart the process and verify if it solves the problem. You can disable and enable the associated subscription using ``aiven_extras``:: - - SELECT * FROM aiven_extras.pg_alter_subscription_disable('dest_subscription'); - SELECT * FROM aiven_extras.pg_alter_subscription_enable('dest_subscription'); - -* If the ``dest_slot`` connector is no longer needed, run the following command to remove it:: - - SELECT pg_drop_replication_slot('dest_slot'); + * If the ``dest_slot`` connector is still in use, a recommended approach is to restart the process and verify if it solves the problem. You can disable and enable the associated subscription using ``aiven_extras``: + + .. code:: + + SELECT * FROM aiven_extras.pg_alter_subscription_disable('dest_subscription'); + SELECT * FROM aiven_extras.pg_alter_subscription_enable('dest_subscription'); + + * If the ``dest_slot`` connector is no longer needed, run the following command to remove it: + + .. code:: + + SELECT pg_drop_replication_slot('dest_slot'); 4. In both cases, after the next PostgreSQL checkpoint, the disk space that the WAL logs have reserved for the ``dest_subscription`` connector should be freed up. -.. Note:: + .. Note:: - The checkpoint occurs only when + The checkpoint occurs only when * an hour has elapsed (we use a ``checkpoint_timeout`` value of 3600 seconds), or * 5% of disk write operations is reached (the ``max_wal_size`` value is set to 5% of the instance storage). diff --git a/docs/products/postgresql/howto/upgrade.rst b/docs/products/postgresql/howto/upgrade.rst index 5318bfbbe1..a13dfd21e5 100644 --- a/docs/products/postgresql/howto/upgrade.rst +++ b/docs/products/postgresql/howto/upgrade.rst @@ -25,16 +25,14 @@ without affecting the running service. This is useful in two main aspects: Here are the steps to upgrade a PostgreSQL service: 1. Log in to `Aiven Console `_, and select the instance that you want to upgrade. - -2. In the **Overview** page of your service, scroll down to the **PostgreSQL version** section, and select **Upgrade version**. - -3. In the **Upgrade Aiven for PostgreSQL Confirmation** window, select the version that you want to upgrade to from the dropdown menu. +2. Select **Service settings** from the sidebar of your service's page. +3. Navigate to the **Service management** section, and select **Upgrade versioin** from the **Actions** (**...**) menu. +4. In the **Upgrade Aiven for PostgreSQL Confirmation** window, select the version that you want to upgrade to from the dropdown menu. .. Note:: When you select the version, the system checks the compatibility of the upgrade. - -4. Select **Upgrade**. +5. Select **Upgrade**. The system starts applying the upgrade. diff --git a/docs/products/postgresql/howto/use-pgvector.rst b/docs/products/postgresql/howto/use-pgvector.rst index 0cdea7446c..cf0cfc9297 100644 --- a/docs/products/postgresql/howto/use-pgvector.rst +++ b/docs/products/postgresql/howto/use-pgvector.rst @@ -100,7 +100,7 @@ To stop the pgvector extension and remove it from a database, run the following DROP EXTENSION vector; -Related reading +Related pages --------------- * :doc:`pgvector for AI-powered search in Aiven for PostgreSQL® ` diff --git a/docs/products/postgresql/reference.rst b/docs/products/postgresql/reference.rst index 2f9951294e..c3ffb233e2 100644 --- a/docs/products/postgresql/reference.rst +++ b/docs/products/postgresql/reference.rst @@ -3,7 +3,7 @@ Aiven for PostgreSQL® reference .. grid:: 1 2 2 2 - .. grid-item-card:: :doc:`Advanced parameters ` + .. grid-item-card:: :doc:`Advanced parameters ` :shadow: md :margin: 2 2 0 0 diff --git a/docs/products/postgresql/reference/list-of-advanced-params.rst b/docs/products/postgresql/reference/advanced-params.rst similarity index 100% rename from docs/products/postgresql/reference/list-of-advanced-params.rst rename to docs/products/postgresql/reference/advanced-params.rst diff --git a/docs/products/postgresql/reference/list-of-extensions.rst b/docs/products/postgresql/reference/list-of-extensions.rst index 98ff081de9..0c52048f6a 100644 --- a/docs/products/postgresql/reference/list-of-extensions.rst +++ b/docs/products/postgresql/reference/list-of-extensions.rst @@ -13,216 +13,102 @@ To check the details, including the version number of the extension, run ``selec .. |PG14only| replace:: :bdg-secondary:`PG14 only` .. note:: - Not all extensions listed in ``pg_available_extensions`` are able to be installed. See :ref:`Superuser-only Extensions` for more. + + Not all extensions listed in ``pg_available_extensions`` are able to be installed. See :ref:`Superuser-only Extensions` for more. Data types ---------- -``chkpass`` - https://www.postgresql.org/docs/10/chkpass.html - Data type for auto-encrypted passwords. |PG10only| - -``citext`` - https://www.postgresql.org/docs/current/citext.html - Data type for case-insensitive character strings. - -``cube`` - https://www.postgresql.org/docs/current/cube.html - Data type for multidimensional cubes. - -``hll`` - https://github.com/citusdata/postgresql-hll - Type for storing ``hyperloglog`` data. |PG11onwards| - -``hstore`` - https://www.postgresql.org/docs/current/hstore.html - Data type for storing sets of (key, value) pairs. - -``isn`` - https://www.postgresql.org/docs/current/isn.html - Data types for international product numbering standards. - -``ltree`` - https://www.postgresql.org/docs/current/ltree.html - Data type for hierarchical tree-like structures. - -``pgvector`` - https://github.com/pgvector/pgvector - Type for vector similarity search. |PG13onwards| - -``seg`` - https://www.postgresql.org/docs/current/seg.html - Data type for representing line segments or floating-point intervals. - -``timescaledb`` - https://github.com/timescale/timescaledb - Enables scalable inserts and complex queries for time-series data. - -``unit`` - https://github.com/df7cb/postgresql-unit - SI units extension. - -``uuid-ossp`` - https://www.postgresql.org/docs/current/uuid-ossp.html - Generate universally unique identifiers (UUIDs). +- `chkpass `__. Data type for auto-encrypted passwords. |PG10only| +- `citext `__. Data type for case-insensitive character strings. +- `cube `__. Data type for multidimensional cubes. +- `hll `__. Type for storing ``hyperloglog`` data. |PG11onwards| +- `hstore `__. Data type for storing sets of (key, value) pairs. +- `isn `__. Data types for international product numbering standards. +- `ltree `__. Data type for hierarchical tree-like structures. +- `pgvector `__. Type for vector similarity search. |PG13onwards| +- `seg `__. Data type for representing line segments or floating-point intervals. +- `timescaledb `__. Enables scalable inserts and complex queries for time-series data. +- `unit `__. SI units extension. +- `uuid-ossp `__. Generate universally unique identifiers (UUIDs). Search and text handling ------------------------ -``bloom`` - https://www.postgresql.org/docs/current/bloom.html - Bloom access method - signature file based index. - -``btree_gin`` - https://www.postgresql.org/docs/current/btree-gin.html - Support for indexing common data types in GIN. - -``btree_gist`` - https://www.postgresql.org/docs/current/btree-gist.html - Support for indexing common data types in GiST. - -``dict_int`` - https://www.postgresql.org/docs/current/dict-int.html - Text search dictionary template for integers. - -``fuzzystrmatch`` - https://www.postgresql.org/docs/current/fuzzystrmatch.html - Determine similarities and distance between strings. - -``pg_similarity`` - https://github.com/eulerto/pg_similarity - Support similarity queries. |PG13onwards| - -``pg_trgm`` - https://www.postgresql.org/docs/current/pgtrgm.html - Text similarity measurement and index searching based on trigrams. - -``pgcrypto`` - https://www.postgresql.org/docs/current/pgcrypto.html - Cryptographic functions. - -``rum`` - https://github.com/postgrespro/rum - RUM index access method. - -``unaccent`` - https://www.postgresql.org/docs/current/unaccent.html - Text search dictionary that removes accents. +- `bloom `__. Bloom access method - signature file based index. +- `btree_gin `__. Support for indexing common data types in GIN. +- `btree_gist `__. Support for indexing common data types in GiST. +- `dict_int `__. Text search dictionary template for integers. +- `fuzzystrmatch `__. Determine similarities and distance between strings. +- `pg_similarity `__. Support similarity queries. |PG13onwards| +- `pg_trgm `__. Text similarity measurement and index searching based on trigrams. +- `pgcrypto `__. Cryptographic functions. +- `rum `__. RUM index access method. +- `unaccent `__. Text search dictionary that removes accents. Auditing ------------------------ -``tcn`` - https://www.postgresql.org/docs/current/tcn.html - Triggered change notifications. +- `tcn `__. Triggered change notifications. Geographical features --------------------- -``address_standardizer`` - https://postgis.net/docs/standardize_address.html - Used to parse an address into constituent elements. Generally used to support geocoding address normalization step. - -``address_standardizer_data_us`` - https://postgis.net/docs/standardize_address.html - ``Address standardizer`` US dataset example. - -``earthdistance`` - https://www.postgresql.org/docs/current/earthdistance.html - Calculate great-circle distances on the surface of the Earth. - -``pgrouting`` - https://github.com/pgRouting/pgrouting - Extends the PostGIS/PostgreSQL geospatial database to provide geospatial routing and other network analysis functionality. - -``postgis`` - https://postgis.net/ - PostGIS geometry and geography spatial types and functions. - -``postgis_legacy`` - https://postgis.net/ - Legacy functions for PostGIS. - -``postgis_raster`` - https://postgis.net/docs/RT_reference.html - PostGIS raster types and functions. - -``postgis_sfcgal`` - http://postgis.net/docs/reference.html#reference_sfcgal - PostGIS SFCGAL functions. - -``postgis_tiger_geocoder`` - https://postgis.net/docs/Extras.html#Tiger_Geocoder - PostGIS tiger geocoder and reverse geocoder. - -``postgis_topology`` - https://postgis.net/docs/Topology.html - PostGIS topology spatial types and functions. +- `address_standardizer `__. Used to parse an address into constituent elements. Generally used to support geocoding address normalization step. +- `address_standardizer_data_us `__. ``Address standardizer`` US dataset example. +- `earthdistance `__. Calculate great-circle distances on the surface of the Earth. +- `pgrouting `__. Extends the PostGIS/PostgreSQL geospatial database to provide geospatial routing and other network analysis functionality. +- `postgis `__. PostGIS geometry and geography spatial types and functions. +- `postgis_legacy `__. Legacy functions for PostGIS. +- `postgis_raster `__. PostGIS raster types and functions. +- `postgis_sfcgal `__. PostGIS SFCGAL functions. +- `postgis_tiger_geocoder `__. PostGIS tiger geocoder and reverse geocoder. +- `postgis_topology `__. PostGIS topology spatial types and functions. Procedural language ------------------- -``plcoffee`` - https://github.com/plv8/plv8 - PL/CoffeeScript (v8) trusted procedural language. |PG10only| - -``plls`` - https://github.com/plv8/plv8 - PL/LiveScript (v8) trusted procedural language. |PG10only| - -``plperl`` - https://www.postgresql.org/docs/current/plperl.html - PL/Perl procedural language. - -``plpgsql`` - https://www.postgresql.org/docs/current/plpgsql.html - PL/pgSQL procedural language. - -``plv8`` - https://github.com/plv8/plv8 - PL/JavaScript (v8) trusted procedural language. |PG10only| +- `plcoffee `__. PL/CoffeeScript (v8) trusted procedural language. |PG10only| +- `plls `__. PL/LiveScript (v8) trusted procedural language. |PG10only| +- `plperl `__. PL/Perl procedural language. +- `plpgsql `__. PL/pgSQL procedural language. +- `plv8 `__. PL/JavaScript (v8) trusted procedural language. |PG10only| Connectivity ------------ -``dblink`` - https://www.postgresql.org/docs/current/contrib-dblink-function.html - Connect to other PostgreSQL databases from within a database. - -``postgres_fdw`` - https://www.postgresql.org/docs/current/postgres-fdw.html - Foreign-data wrapper for remote PostgreSQL servers. +- `dblink `__. Connect to other PostgreSQL databases from within a database. +- `postgres_fdw `__. Foreign-data wrapper for remote PostgreSQL servers. Utilities --------- -``aiven_extras`` - https://github.com/aiven/aiven-extras - This extension is meant for use in environments where you want non-superusers to be able to use certain database features. - -``bool_plperl`` - https://www.postgresql.org/docs/current/plperl-funcs.html - Transform between ``bool`` and ``plperl``. |PG13onwards| - -``intagg`` - https://www.postgresql.org/docs/current/intagg.html - Integer aggregator and enumerator (obsolete). - -``intarray`` - https://www.postgresql.org/docs/current/intarray.html - Functions, operators, and index support for 1-D arrays of integers. - -``jsonb_plperl`` - https://www.postgresql.org/docs/current/datatype-json.html - Transform between ``jsonb`` and ``plperl``. - -``lo`` - https://www.postgresql.org/docs/current/lo.html - Large Object maintenance. - -``pageinspect`` - https://www.postgresql.org/docs/current/pageinspect.html - Inspect the contents of database pages at a low level. - -``pg_buffercache`` - https://www.postgresql.org/docs/current/pgbuffercache.html - Examine the shared buffer cache. - -``pg_cron`` - https://github.com/citusdata/pg_cron - Job scheduler for PostgreSQL. - -``pg_partman`` - https://github.com/pgpartman/pg_partman - Extension to manage partitioned tables by time or ID. - -``pg_prewarm`` - https://www.postgresql.org/docs/current/pgprewarm.html - Prewarm relation data. |PG11onwards| - -``pg_prometheus`` - https://github.com/timescale/pg_prometheus - Prometheus metrics for PostgreSQL. |PG12earlier| - -``pg_repack`` - https://pgxn.org/dist/pg_repack/1.4.6/ - Reorganize tables in PostgreSQL databases with minimal locks. - -``pg_stat_statements`` - https://www.postgresql.org/docs/current/pgstatstatements.html - Track planning and execution statistics of all SQL statements executed. - -``pgrowlocks`` - https://www.postgresql.org/docs/current/pgrowlocks.html - Show row-level locking information. - -``pgstattuple`` - https://www.postgresql.org/docs/current/pgstattuple.html - Show tuple-level statistics. - -``sslinfo`` - https://www.postgresql.org/docs/current/sslinfo.html - Information about SSL certificates. - -``tablefunc`` - https://www.postgresql.org/docs/current/tablefunc.html - Functions that manipulate whole tables, including ``crosstab``. - -``timetravel`` - https://www.postgresql.org/docs/6.3/c0503.htm - Functions for implementing time travel. |PG11earlier| - -``tsm_system_rows`` - https://www.postgresql.org/docs/current/tsm-system-rows.html - TABLESAMPLE method which accepts number of rows as a limit. - -``tsm_system_time`` - https://www.postgresql.org/docs/current/tsm-system-time.html - TABLESAMPLE method which accepts time in milliseconds as a limit. +- `aiven_extras `__. This extension is meant for use in environments where you want non-superusers to be able to use certain database features. +- `bool_plperl `__. Transform between ``bool`` and ``plperl``. |PG13onwards| +- `intagg `__. Integer aggregator and enumerator (obsolete). +- `intarray `__. Functions, operators, and index support for 1-D arrays of integers. +- `jsonb_plperl `__. Transform between ``jsonb`` and ``plperl``. +- `lo `__. Large Object maintenance. +- `pageinspect `__. Inspect the contents of database pages at a low level. +- `pg_buffercache `__. Examine the shared buffer cache. +- `pg_cron `__. Job scheduler for PostgreSQL. +- `pg_partman `__. Extension to manage partitioned tables by time or ID. +- `pg_prewarm `__. Prewarm relation data. |PG11onwards| +- `pg_prometheus `__. Prometheus metrics for PostgreSQL. |PG12earlier| +- `pg_repack `__. Reorganize tables in PostgreSQL databases with minimal locks. +- `pg_stat_statements `__. Track planning and execution statistics of all SQL statements executed. +- `pgrowlocks `__. Show row-level locking information. +- `pgstattuple `__. Show tuple-level statistics. +- `sslinfo `__. Information about SSL certificates. +- `tablefunc `__. Functions that manipulate whole tables, including ``crosstab``. +- `timetravel `__. Functions for implementing time travel. |PG11earlier| +- `tsm_system_rows `__. TABLESAMPLE method which accepts number of rows as a limit. +- `tsm_system_time `__. TABLESAMPLE method which accepts time in milliseconds as a limit. .. _avn_superuser_only_extensions: @@ -231,53 +117,20 @@ Superuser-only extensions The following extensions can only be installed by superusers, **and are not generally available**. -``amcheck`` - https://www.postgresql.org/docs/current/amcheck.html - Functions for verifying relation integrity. - -``autoinc`` - https://www.postgresql.org/docs/current/contrib-spi.html - Functions for auto-incrementing fields. - -``bool_plperlu`` - https://www.postgresql.org/docs/current/plperl-funcs.html - Transform between ``bool`` and ``plperlu``. |PG13onwards| - -``dict_xsyn`` - https://www.postgresql.org/docs/current/dict-xsyn.html - Text search dictionary template for extended synonym processing. - -``file_fdw`` - https://www.postgresql.org/docs/current/file-fdw.html - Foreign-data wrapper for flat file access. - -``hstore_plperl`` - https://www.postgresql.org/docs/current/hstore.html - Transform between ``hstore`` and ``plperl``. - -``hstore_plperlu`` - https://www.postgresql.org/docs/current/hstore.html - Transform between ``hstore`` and ``plperlu``. - -``insert_username`` - https://www.postgresql.org/docs/current/contrib-spi.html - Functions for tracking who changed a table. - -``jsonb_plperlu`` - https://www.postgresql.org/docs/current/datatype-json.html - Transform between ``jsonb`` and ``plperlu``. - -``moddatetime`` - https://www.postgresql.org/docs/10/contrib-spi.html - Functions for tracking last modification time. - -``old_snapshot`` - https://www.postgresql.org/docs/current/oldsnapshot.html - Utilities in support of old_snapshot_threshold. |PG14only| - -``pageinspect`` - https://www.postgresql.org/docs/current/pageinspect.html - Inspect the contents of database pages at a low level. - -``pg_freespacemap`` - https://www.postgresql.org/docs/current/pgfreespacemap.html - Examine the free space map (FSM). - -``pg_surgery`` - https://www.postgresql.org/docs/current/pgsurgery.html - Extension to perform surgery on a damaged relation. |PG14only| - -``pg_visibility`` - https://www.postgresql.org/docs/current/pgvisibility.html - Examine the visibility map (VM) and page-level visibility info. - -``plperlu`` - https://www.postgresql.org/docs/current/plperl-trusted.html - PL/PerlU untrusted procedural language. - -``refint`` - https://www.postgresql.org/docs/current/contrib-spi.html - Functions for implementing referential integrity (obsolete). +- `amcheck `__. Functions for verifying relation integrity. +- `autoinc `__. Functions for auto-incrementing fields. +- `bool_plperlu `__. Transform between ``bool`` and ``plperlu``. |PG13onwards| +- `dict_xsyn `__. Text search dictionary template for extended synonym processing. +- `file_fdw `__. Foreign-data wrapper for flat file access. +- `hstore_plperl `__. Transform between ``hstore`` and ``plperl``. +- `hstore_plperlu `__. Transform between ``hstore`` and ``plperlu``. +- `insert_username `__. Functions for tracking who changed a table. +- `jsonb_plperlu `__. Transform between ``jsonb`` and ``plperlu``. +- `moddatetime `__. Functions for tracking last modification time. +- `old_snapshot `__. Utilities in support of old_snapshot_threshold. |PG14only| +- `pageinspect `__. Inspect the contents of database pages at a low level. +- `pg_freespacemap `__. Examine the free space map (FSM). +- `pg_surgery `__. Extension to perform surgery on a damaged relation. |PG14only| +- `pg_visibility `__. Examine the visibility map (VM) and page-level visibility info. +- `plperlu `__. PL/PerlU untrusted procedural language. +- `refint `__. Functions for implementing referential integrity (obsolete). diff --git a/docs/products/postgresql/reference/log-formats-supported.rst b/docs/products/postgresql/reference/log-formats-supported.rst index 7aaafd98c8..ca3475394e 100644 --- a/docs/products/postgresql/reference/log-formats-supported.rst +++ b/docs/products/postgresql/reference/log-formats-supported.rst @@ -5,7 +5,7 @@ Aiven for PostgreSQL® supports setting different log formats which are compatib You can customise this functionality by navigating to your PostgreSQL® service on the `Aiven Console `_. -Scroll down to the end of the **Overview** page under **Advanced configuration** and select the **Change** button. You can then select the ``pg.log_line_prefix`` parameter and select the needed format based on a pre-fixed list. +From the sidebar on your service's page, select **Service settings**. On the **Service settings** page, navigate to the **Advanced configuration** section, and select **Configure** > **Add configuration options**. Next, you can select the ``pg.log_line_prefix`` parameter and a desired format based on a pre-fixed list. The supported log formats are available below with an example of the output: @@ -30,7 +30,7 @@ The supported log formats are available below with an example of the output: [pg-user-test-1]2023-01-12T00:00:57.839867[postgresql-14][18-1] 2023-01-12 00:00:57.839 GMT [1323] [user=postgres,db=defaultdb,app=[unknown]] LOG: connection authorized: user=postgres database=defaultdb application_name=aiven-pruned [pg-user-test-1]2023-01-12T00:00:57.849223[postgresql-14][19-1] 2023-01-12 00:00:57.849 GMT [1323] [user=postgres,db=defaultdb,app=aiven-pruned] LOG: disconnection: session time: 0:00:00.010 user=postgres database=defaultdb host=[local] -After selecting one of the available log formats from the drop down menu, you can click the **Save advanced configuration** button to have the change take effect. Once the setting has been enabled, you can navigate to the logs tab on your service page to check if the log format has been successfully changed. +After selecting one of the available log formats from the drop down menu, select **Save configuration** to have the change take effect. Once the setting has been enabled, you can navigate to the logs tab on your service page to check if the log format has been successfully changed. At the moment, the formats available are known to be compatible with majority of the log analysis tools. diff --git a/docs/products/postgresql/reference/terminology.rst b/docs/products/postgresql/reference/terminology.rst index 91a4b796d4..94dcffd00f 100644 --- a/docs/products/postgresql/reference/terminology.rst +++ b/docs/products/postgresql/reference/terminology.rst @@ -1,22 +1,8 @@ Terminology for PostgreSQL® =========================== -.. _Terminology PGPrimary: - -Primary node: The PostgreSQL® primary node is the main server node that processes SQL queries, makes the necessary changes to the database files on the disk, and returns the results to the client application. - -.. _Terminology PGStandby: - -Standby node: PostgreSQL standby nodes (also called replicas) replicate the changes from the primary node and try to maintain an up-to-date copy of the same database files that exists on the primary node. - -.. _Terminology PGWAL: - -Write-Ahead Log (WAL): The WAL is a log file storing all the database transactions in a sequential manner. - -.. _Terminology PGLookout: - -pglookout: `pglookout `_ is a PostgreSQL replication monitoring and failover daemon. - -.. _Terminology PGHoard: - -PGHoard: `PGHoard `_ is a PostgreSQL backup daemon and restore tooling that stores backup data in cloud object stores +- **Primary node**: The PostgreSQL® primary node is the main server node that processes SQL queries, makes the necessary changes to the database files on the disk, and returns the results to the client application. +- **Standby node**: PostgreSQL standby nodes (also called replicas) replicate the changes from the primary node and try to maintain an up-to-date copy of the same database files that exists on the primary node. +- **Write-Ahead Log (WAL)**: The WAL is a log file storing all the database transactions in a sequential manner. +- **pglookout**: `pglookout `_ is a PostgreSQL replication monitoring and failover daemon. +- **PGHoard**: `PGHoard `_ is a PostgreSQL backup daemon and restore tooling that stores backup data in cloud object stores diff --git a/docs/products/postgresql/reference/use-of-deprecated-tls-versions.rst b/docs/products/postgresql/reference/use-of-deprecated-tls-versions.rst index 0b5533dac3..010cfa6621 100644 --- a/docs/products/postgresql/reference/use-of-deprecated-tls-versions.rst +++ b/docs/products/postgresql/reference/use-of-deprecated-tls-versions.rst @@ -29,7 +29,7 @@ To check the TLS versions clients are connecting with, you can query the ``pg_st .. code:: - datname │ pid │ usesysid │ usename │ application_name │ client_addr │ ssl │ version │ cipher │ backend_start + datname │ pid │ usesysid │ usename │ application_name │ client_addr │ ssl │ version │ cipher │ backend_start ──────────┼─────────┼──────────┼──────────┼──────────────────┼────────────────┼─────┼─────────┼────────────────────────┼─────────────────────────────── defaultdb │ 2172508 │ 16412 │ avnadmin │ psql │ 192.178.0.1 │ t │ TLSv1.3 │ TLS_AES_256_GCM_SHA384 │ 2022-09-12 12:39:12.644646+00 diff --git a/docs/products/postgresql/troubleshooting/troubleshooting-connection-pooling.rst b/docs/products/postgresql/troubleshooting/troubleshooting-connection-pooling.rst index df49ddfad5..53b56364bb 100644 --- a/docs/products/postgresql/troubleshooting/troubleshooting-connection-pooling.rst +++ b/docs/products/postgresql/troubleshooting/troubleshooting-connection-pooling.rst @@ -10,18 +10,15 @@ PgBouncer is a lightweight connection pooler for PostgreSQL® with low memory re PgBouncer offers several methods when rotating connections: -Session pooling - This is the most permissive method. When a client connects, it gets assigned with a server connection that is maintained as long as the client stays connected. When the client disconnects, the server connection is put back into the pool. This mode supports all PostgreSQL features. +- **Session pooling:** This is the most permissive method. When a client connects, it gets assigned with a server connection that is maintained as long as the client stays connected. When the client disconnects, the server connection is put back into the pool. This mode supports all PostgreSQL features. -Transaction pooling - A server connection is assigned to a client only during a transaction. When PgBouncer notices that the transaction is over, the server connection is put back into the pool. +- **Transaction pooling:** A server connection is assigned to a client only during a transaction. When PgBouncer notices that the transaction is over, the server connection is put back into the pool. .. warning:: - This mode breaks a few session-based features of PostgreSQL. Use it only when the application cooperates without using the features that break. For incompatible features, see `PostgreSQL feature map for pooling modes `_. + This mode breaks a few session-based features of PostgreSQL. Use it only when the application cooperates without using the features that break. For incompatible features, see `PostgreSQL feature map for pooling modes `_. -Statement pooling - This is the most restrictive method, which disallows multi-statement transactions. This is meant to enforce the ``autocommit`` mode on the client and is mostly targeted at PL/Proxy. +- **Statement pooling:** This is the most restrictive method, which disallows multi-statement transactions. This is meant to enforce the ``autocommit`` mode on the client and is mostly targeted at PL/Proxy. Handling connection pooling issues ---------------------------------- diff --git a/docs/products/redis/concepts/overview.rst b/docs/products/redis/concepts/overview.rst index b400cdc179..df8420b6f9 100644 --- a/docs/products/redis/concepts/overview.rst +++ b/docs/products/redis/concepts/overview.rst @@ -3,9 +3,6 @@ Aiven for Redis®* overview Aiven for Redis®* is a fully managed **in-memory NoSQL database**, deployable in the cloud of your choice which can help you store and access data quickly and efficiently. -Why Redis®? ------------ - Redis® is an open source, in-memory NoSQL database that serves as a fast data store, cache, or lightweight message broker. With Aiven, you can leverage the power of Redis to improve the performance of your applications by easily setting up high-performance data caching. Additionally, Redis can be integrated seamlessly into your observability stack for purposes such as logging and monitoring. @@ -31,13 +28,11 @@ Ways to use Aiven for Redis ----------------------------- - Redis can be used as a supplementary data store, complementing a primary database like PostgreSQL®. - - Redis is suitable for transient data, caching values for quick access, and data that can be reestablished, such as session data. While Redis is not a persistent storage solution by default, it can be configured to be persistent. -Redis resources ----------------- +Related pages +------------- * `Redis documentation `_ - * `Redis refcard on DZone `_ diff --git a/docs/products/redis/concepts/restricted-redis-commands.rst b/docs/products/redis/concepts/restricted-redis-commands.rst index 610605ca13..341b82c03d 100644 --- a/docs/products/redis/concepts/restricted-redis-commands.rst +++ b/docs/products/redis/concepts/restricted-redis-commands.rst @@ -31,10 +31,10 @@ Disabled eval commands The following script evaluation commands in the Aiven for Redis* service are disabled. If you require these commands to be enabled, contact Aiven support. - ``eval``: Executes a Lua script server-side. -- ``eval_ro``: Read-only variant of the `eval` command. +- ``eval_ro``: Read-only variant of the ``eval`` command. - ``evalsha``: Executes a script cached on the server side by its SHA1 digest. -- ``evalsha_ro``: Read-only variant of the `evalsha` command. +- ``evalsha_ro``: Read-only variant of the ``evalsha`` command. - ``fcall``: Calls a Redis function. -- ``fcall_ro``: Read-only variant of the `fcall` command. +- ``fcall_ro``: Read-only variant of the ``fcall`` command. - ``function``: Manages Redis functions. - ``script``: Manages the script cache. \ No newline at end of file diff --git a/docs/products/redis/howto/connect-go.rst b/docs/products/redis/howto/connect-go.rst index e731b1fb51..dfa757b774 100644 --- a/docs/products/redis/howto/connect-go.rst +++ b/docs/products/redis/howto/connect-go.rst @@ -17,9 +17,11 @@ Variable Description Pre-requisites '''''''''''''' -Get the ``go-redis/redis`` library:: +Get the ``go-redis/redis`` library: - go get github.com/go-redis/redis/v8 +.. code:: + + go get github.com/go-redis/redis/v8 Code '''' diff --git a/docs/products/redis/howto/connect-java.rst b/docs/products/redis/howto/connect-java.rst index 9501878186..83034b8620 100644 --- a/docs/products/redis/howto/connect-java.rst +++ b/docs/products/redis/howto/connect-java.rst @@ -17,7 +17,9 @@ Variable Description Pre-requisites '''''''''''''' -If there is ``maven`` installed then download of ``jredis`` and dependencies and putting it to ``lib`` folder could be done :: +If there is ``maven`` installed then download of ``jredis`` and dependencies and putting it to ``lib`` folder could be done: + +.. code:: mvn org.apache.maven.plugins:maven-dependency-plugin:2.8:get -Dartifact=redis.clients:jedis:4.1.1:jar -Ddest=lib/jedis-4.1.1.jar \ && mvn org.apache.maven.plugins:maven-dependency-plugin:2.8:get -Dartifact=org.apache.commons:commons-pool2:2.11.1:jar -Ddest=lib/commons-pool2-2.11.1.jar \ diff --git a/docs/products/redis/howto/connect-redis-cli.rst b/docs/products/redis/howto/connect-redis-cli.rst index 1ac0f470e0..b2291d444e 100644 --- a/docs/products/redis/howto/connect-redis-cli.rst +++ b/docs/products/redis/howto/connect-redis-cli.rst @@ -31,12 +31,13 @@ Execute the following from a terminal window: redis-cli -u REDIS_URI - This code connects to the Redis®* database. -To check the connection is working, execute the following code:: +To check the connection is working, execute the following code: - INFO +.. code:: + + INFO The command returns all the Redis parameters: @@ -50,14 +51,18 @@ The command returns all the Redis parameters: redis_mode:standalone ... -To set a key, execute the following command:: +To set a key, execute the following command: - SET mykey mykeyvalue123 +.. code:: + + SET mykey mykeyvalue123 The command should output a confirmation ``OK`` statement. -To retrieve the key value, execute the following command:: +To retrieve the key value, execute the following command: - GET mykey +.. code:: + + GET mykey The result is the value of the key, in the above example ``"mykeyvalue123"``. diff --git a/docs/products/redis/howto/manage-ssl-connectivity.rst b/docs/products/redis/howto/manage-ssl-connectivity.rst index d185093a49..aa15a39b17 100644 --- a/docs/products/redis/howto/manage-ssl-connectivity.rst +++ b/docs/products/redis/howto/manage-ssl-connectivity.rst @@ -6,19 +6,23 @@ Client support for SSL-encrypted connections Default support ~~~~~~~~~~~~~~~ -Aiven for Redis®* uses SSL encrypted connections by default. This is shown by the use of ``rediss://`` (with double s) prefix in the ``Service URI`` on the `Aiven Console `_. +Aiven for Redis®* uses SSL encrypted connections by default. This is shown by the use of ``rediss://`` (with double ``s``) prefix in the ``Service URI`` on the `Aiven Console `_. .. Tip:: You can find the ``Service URI`` on `Aiven console `_. -Since **Redis 6**, the ``redis-cli`` tool itself supports SSL connections; therefore, you can connect directly to your service using:: +Since **Redis 6**, the ``redis-cli`` tool itself supports SSL connections; therefore, you can connect directly to your service using: + +.. code:: redis-cli -u rediss://username:password@host:port -Alternatively, you can use the third-party `Redli tool `_:: +Alternatively, you can use the third-party `Redli tool `_: + +.. code:: - redli -u rediss://username:password@host:port + redli -u rediss://username:password@host:port Not every Redis client supports SSL-encrypted connections. @@ -52,22 +56,13 @@ To understand the global options of the ``stunnel`` configuration, please check For ``service-level option``, the following parameters are configured: -``accept`` => *[host:]port* - **Accept connections on specified address** - - +- ``accept => *[host:]port*``: Accept connections on the specified address. +- ``connect => *[host:]port*``: Connect to a remote address. +- ``TIMEOUTclose => *seconds*``: Time to wait for close_notify. -``connect`` => *[host:]port* - **Connect to a remote address** +.. note:: It is important to make changes accordingly to your service. On the **Overview** page you can find your **Overview** > **Host** and **Overview** > **Port** to configure the ``connect`` parameter. - - -``TIMEOUTclose`` => *seconds* - **Time to wait for close_notify** - -.. note:: It is important to make changes accordingly to your service. On the *Overview* page you can find your **Overview** > **Host** and **Overview** > **Port** to configure the ``connect`` parameter. - -It is important to note that when SSL is in use, HAProxy will be responsible for terminating the SSL connections before they get forwarded to Redis. This process has a connection timeout set to 12 hours which is not configurable by the customer. If you allow very long Redis timeouts, this SSL-terminating HAProxy may end up closing the connection before the Redis timeout has expired. This timeout is independent of Redis timeout. +When SSL is in use, HAProxy is responsible for terminating the SSL connections before they get forwarded to Redis. This process has a connection timeout set to 12 hours which is not configurable by the customer. If you allow very long Redis timeouts, this SSL-terminating HAProxy may end up closing the connection before the Redis timeout has expired. This timeout is independent of Redis timeout. Allow plain-text connections ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -84,6 +79,4 @@ To disable SSL on an existing Redis instance use the following Aiven CLI command avn service update -c "redis_ssl=false" -After executing the command, the ``Service URI`` will change and point at the new location, it will also start with the ``redis://`` (removing the extra s) prefix denoting that it's a direct Redis connection which does not use SSL. - - +After executing the command, the ``Service URI`` points to the new location and starts with the ``redis://`` (removing the extra ``s``) prefix meaning it's a direct Redis connection which does not use SSL. diff --git a/docs/products/redis/howto/migrate-redis-aiven-via-console.rst b/docs/products/redis/howto/migrate-redis-aiven-via-console.rst index 5e295ad9c4..bb46ded480 100644 --- a/docs/products/redis/howto/migrate-redis-aiven-via-console.rst +++ b/docs/products/redis/howto/migrate-redis-aiven-via-console.rst @@ -35,9 +35,10 @@ Migrate a Redis®* database Follow these steps to migrate a Redis®* database to Aiven for Redis®* service: 1. Log in to the `Aiven Console `_, and select the target Aiven for Redis®* service to which you want to migrate the Redis®* database. -2. From the **Overview** tab, scroll down to the **Migrate database** section. -3. Select **Set up migration**. -4. You will see a wizard that guides you through the database migration process. +2. Click **Service settings** on the sidebar. +3. Scroll to the **Service management** section, and click **Actions** (**...**) menu. +4. Click **Import database** to initiate the import process. +5. You will see a wizard that guides you through the database migration process. Step 1: Configure ````````````````````` @@ -100,7 +101,7 @@ When the wizard informs you about the completion of the migration, you can choos Your data has been successfully migrated to the designated Aiven for Redis database, and any subsequent additions to the connected databases are being continuously synchronized. -Related articles +Related pages ---------------- * :doc:`/docs/products/redis/howto/migrate-aiven-redis` diff --git a/docs/tools/aiven-console.rst b/docs/tools/aiven-console.rst index 6b8cfdba7b..a3b3c5659d 100644 --- a/docs/tools/aiven-console.rst +++ b/docs/tools/aiven-console.rst @@ -47,7 +47,7 @@ If you don't have an organization, click **Create organization** to :doc:`create Organization and organizational unit settings are available on the **Admin** page. Here you can: -* :doc:`Manage your groups` +* :doc:`Manage your groups ` * Create new projects under an organization or organizational unit * Configure :doc:`authentication methods for an organization ` * View logs of activity such as the adding or removing of users, changing authentication methods, and more diff --git a/docs/tools/aiven-console/howto/create-manage-teams.rst b/docs/tools/aiven-console/howto/create-manage-teams.rst index 2e72057bab..921ddc1392 100644 --- a/docs/tools/aiven-console/howto/create-manage-teams.rst +++ b/docs/tools/aiven-console/howto/create-manage-teams.rst @@ -7,7 +7,7 @@ Create and manage teams .. important:: **Teams are becoming groups** - :doc:`Groups ` are an easier way to control access to your organization's projects and services for a group of users. + Groups are an easier way to control access to your organization's projects and services for a group of users. See :ref:`migrate_teams_to_groups`. Create a new team diff --git a/docs/tools/api.rst b/docs/tools/api.rst index f0a25b6af9..45835b0a16 100644 --- a/docs/tools/api.rst +++ b/docs/tools/api.rst @@ -19,7 +19,6 @@ API quickstart * **Postman**: Try `Aiven on Postman `_ and start working with your data platform programmatically. * **API documentation**: Check the `API documentation and OpenAPI description `_ to work with the API directly. -* **Examples**: See the API in action with some :doc:`API examples `. Authentication -------------- @@ -42,6 +41,133 @@ information. This is perfect for machines but not ideal for humans. Try a tool like ``jq`` (https://stedolan.github.io/jq/) to make things easier to read and manipulate. +API examples +------------ + +In the following examples, replace ``{TOKEN}`` with your own value of the authentication token. + +List your projects +++++++++++++++++++ + +.. code:: + + curl -H "Authorization: aivenv1 {TOKEN}" https://api.aiven.io/v1/project + +The following is a sample response: + +.. code:: json + + { + "project_membership": { + "my-best-demo": "admin", + "aiven-sandbox": "admin" + }, + "project_memberships": { + "my-best-demo": [ + "admin" + ], + "aiven-sandbox": [ + "admin" + ] + }, + "projects": [ + { + "account_id": "a225dad8d3c4", + "account_name": "Aiven Accounts", + "address_lines": [], + "available_credits": "0.00", + "billing_address": "", + "billing_currency": "USD", + "billing_emails": [], + "billing_extra_text": null, + "billing_group_id": "588a8e63-fda7-4ff7-9bff-577debfee604", + "billing_group_name": "Billing", + "card_info": null, + "city": "", + "company": "", + "country": "", + "country_code": "", + "default_cloud": "google-europe-north1", + "end_of_life_extension": {}, + "estimated_balance": "4.11", + "estimated_balance_local": "4.11", + "payment_method": "no_payment_expected", + "project_name": "my-best-demo", + "state": "", + "tags": {}, + "tech_emails": [], + "tenant_id": "aiven", + "trial_expiration_time": null, + "vat_id": "", + "zip_code": "" + }, + { + "account_id": "a225dad8d3c4", + "account_name": "Aiven Accounts", + "address_lines": [], + "available_credits": "0.00", + "billing_address": "", + "billing_currency": "USD", + "billing_emails": [], + "billing_extra_text": null, + "billing_group_id": "588a8e63-fda7-4ff7-9bff-577debfee604", + "billing_group_name": "Billing", + "card_info": null, + "city": "", + "company": "", + "country": "", + "country_code": "", + "default_cloud": "google-europe-north1", + "end_of_life_extension": {}, + "estimated_balance": "4.11", + "estimated_balance_local": "4.11", + "payment_method": "no_payment_expected", + "project_name": "aiven-sandbox", + "state": "", + "tags": {}, + "tech_emails": [], + "tenant_id": "aiven", + "trial_expiration_time": null, + "vat_id": "", + "zip_code": "" + } + ] + } + + + +List of cloud regions +--------------------- + +This endpoint does not require authorization; if you aren't authenticated then the standard set of clouds will be returned. + +.. code:: + + curl https://api.aiven.io/v1/clouds + +The following is a sample response: + +.. code:: json + + { + "clouds": [ + { + "cloud_description": "Africa, South Africa - Amazon Web Services: Cape Town", + "cloud_name": "aws-af-south-1", + "geo_latitude": -33.92, + "geo_longitude": 18.42, + "geo_region": "africa" + }, + { + "cloud_description": "Africa, South Africa - Azure: South Africa North", + "cloud_name": "azure-south-africa-north", + "geo_latitude": -26.198, + "geo_longitude": 28.03, + "geo_region": "africa" + }, + +For most endpoints where a cloud is used as an input, the ``cloud_name`` from this result is the field to use. + Further reading --------------- diff --git a/docs/tools/api/examples.rst b/docs/tools/api/examples.rst deleted file mode 100644 index c7ef7e9b8e..0000000000 --- a/docs/tools/api/examples.rst +++ /dev/null @@ -1,133 +0,0 @@ -API examples -============ - -Here is an example to get you started with the Aiven API using curl. Replace ``{TOKEN}`` with your own value of the authentication token. - -List your projects ------------------- - -.. code:: - - curl -H "Authorization: aivenv1 {TOKEN}" https://api.aiven.io/v1/project - -The following is a sample response: - -.. code:: json - - { - "project_membership": { - "my-best-demo": "admin", - "aiven-sandbox": "admin" - }, - "project_memberships": { - "my-best-demo": [ - "admin" - ], - "aiven-sandbox": [ - "admin" - ] - }, - "projects": [ - { - "account_id": "a225dad8d3c4", - "account_name": "Aiven Accounts", - "address_lines": [], - "available_credits": "0.00", - "billing_address": "", - "billing_currency": "USD", - "billing_emails": [], - "billing_extra_text": null, - "billing_group_id": "588a8e63-fda7-4ff7-9bff-577debfee604", - "billing_group_name": "Billing", - "card_info": null, - "city": "", - "company": "", - "country": "", - "country_code": "", - "default_cloud": "google-europe-north1", - "end_of_life_extension": {}, - "estimated_balance": "4.11", - "estimated_balance_local": "4.11", - "payment_method": "no_payment_expected", - "project_name": "my-best-demo", - "state": "", - "tags": {}, - "tech_emails": [], - "tenant_id": "aiven", - "trial_expiration_time": null, - "vat_id": "", - "zip_code": "" - }, - { - "account_id": "a225dad8d3c4", - "account_name": "Aiven Accounts", - "address_lines": [], - "available_credits": "0.00", - "billing_address": "", - "billing_currency": "USD", - "billing_emails": [], - "billing_extra_text": null, - "billing_group_id": "588a8e63-fda7-4ff7-9bff-577debfee604", - "billing_group_name": "Billing", - "card_info": null, - "city": "", - "company": "", - "country": "", - "country_code": "", - "default_cloud": "google-europe-north1", - "end_of_life_extension": {}, - "estimated_balance": "4.11", - "estimated_balance_local": "4.11", - "payment_method": "no_payment_expected", - "project_name": "aiven-sandbox", - "state": "", - "tags": {}, - "tech_emails": [], - "tenant_id": "aiven", - "trial_expiration_time": null, - "vat_id": "", - "zip_code": "" - } - ] - } - - - -List of cloud regions ---------------------- - -This endpoint does not require authorization; if you aren't authenticated then the standard set of clouds will be returned. - -.. code:: - - curl https://api.aiven.io/v1/clouds - -The following is a sample response: - -.. code:: json - - { - "clouds": [ - { - "cloud_description": "Africa, South Africa - Amazon Web Services: Cape Town", - "cloud_name": "aws-af-south-1", - "geo_latitude": -33.92, - "geo_longitude": 18.42, - "geo_region": "africa" - }, - { - "cloud_description": "Africa, South Africa - Azure: South Africa North", - "cloud_name": "azure-south-africa-north", - "geo_latitude": -26.198, - "geo_longitude": 28.03, - "geo_region": "africa" - }, - -For most endpoints where a cloud is used as an input, the ``cloud_name`` from this result is the field to use. - -More endpoints --------------- - -For more information on the available endpoints, see the `Aiven API documentation `_. - - diff --git a/docs/tools/cli.rst b/docs/tools/cli.rst index 91a3683819..5fb2022f5c 100644 --- a/docs/tools/cli.rst +++ b/docs/tools/cli.rst @@ -66,14 +66,6 @@ A set of administrative commands to set up billing groups and manage which proje The billing group command also enables access to the credit code features, and detailed invoice line data. -``card`` -'''''''' - -Manage the payment cards on your account. - -:doc:`See detailed command information `. - - ``cloud`` ''''''''' diff --git a/docs/tools/cli/account.rst b/docs/tools/cli/account.rst index 17fd9af1cc..8a5a04b1c5 100644 --- a/docs/tools/cli/account.rst +++ b/docs/tools/cli/account.rst @@ -5,11 +5,6 @@ This article has the full list of commands for managing organization or organiza Check out the full description of :doc:`Aiven's security model ` for more information. -``avn account authentication-method`` -''''''''''''''''''''''''''''''''''''' - -A full list of commands is available in a :doc:`separate article for managing authentication methods in the CLI `. - ``avn account create`` ''''''''''''''''''''''' diff --git a/docs/tools/cli/account/account-authentication-method.rst b/docs/tools/cli/account/account-authentication-method.rst deleted file mode 100644 index 4995959497..0000000000 --- a/docs/tools/cli/account/account-authentication-method.rst +++ /dev/null @@ -1,126 +0,0 @@ -``avn account authentication-method`` -======================================================== - -Here you'll find the full list of commands for ``avn account authentication-method``. - - -Manage account authentication methods -------------------------------------- - -Commands for managing Aiven accounts authentication methods. - -``avn account authentication-method create`` -'''''''''''''''''''''''''''''''''''''''''''' - -Creates a new authentication method. More information about authentication methods creation is available at the `dedicated page `_ - -.. list-table:: - :header-rows: 1 - :align: left - - * - Parameter - - Information - * - ``account_id`` - - The id of the account - * - ``--name`` - - New authentication method name - * - ``--type`` - - The type of the new authentication method. Currently only ``saml`` is available - * - ``-c`` - - Additional configuration option (in the ``KEY=VALUE`` format) - * - ``-f`` - - Path to file containing additional configuration options (in the ``KEY=VALUE`` format) - -**Example:** Create a new ``saml`` authentication method named ``My Authentication Method`` for the account id ``123456789123``. - -.. code:: - - avn account authentication-method create 123456789123 \ - --name "My Authentication Method" \ - --type saml - -``avn account authentication-method delete`` -'''''''''''''''''''''''''''''''''''''''''''' - -Deletes an existing authentication method. - -.. list-table:: - :header-rows: 1 - :align: left - - * - Parameter - - Information - * - ``account_id`` - - The id of the account - * - ``authentication_id`` - - Id of the authentication method - -**Example:** Delete the authentication method with id ``88888888888`` belonging to the account id ``123456789123``. - -.. code:: - - avn account authentication-method delete 123456789123 88888888888 - -``avn account authentication-method list`` -'''''''''''''''''''''''''''''''''''''''''''' - -Lists the existing authentication methods. - -.. list-table:: - :header-rows: 1 - :align: left - - * - Parameter - - Information - * - ``account_id`` - - The id of the account - -**Example:** List all the authentication methods belonging to the account id ``123456789123``. - -.. code:: - - avn account authentication-method list 123456789123 - -An example of account authentication-method list output: - -.. code:: text - - ACCOUNT_ID AUTHENTICATION_METHOD_ENABLED AUTHENTICATION_METHOD_ID AUTHENTICATION_METHOD_NAME AUTHENTICATION_METHOD_TYPE STATE CREATE_TIME UPDATE_TIME - ============ ============================= ======================== ========================== ========================== ===================== ==================== ==================== - 123456789123 true am2exxxxxxxxx Okta saml active 2020-10-13T16:48:29Z 2021-08-10T08:33:15Z - 123456789123 true am2wwwwwwwwww Centrify saml active 2020-09-28T10:22:50Z 2020-09-28T12:06:06Z - 123456789123 true am2qqqqqqqqqq Azure saml active 2020-09-22T12:30:19Z 2020-09-22T12:34:02Z - 123456789123 true am2yyyyyyyyyy Platform authentication internal active 2020-09-09T20:28:44Z 2020-09-09T20:28:44Z - - -``avn account authentication-method update`` -'''''''''''''''''''''''''''''''''''''''''''' - -Updates an existing authentication method. - -.. list-table:: - :header-rows: 1 - :align: left - - * - Parameter - - Information - * - ``account_id`` - - The id of the account - * - ``authentication_id`` - - Id of the authentication method - * - ``--name`` - - New authentication method name - * - ``--enable`` - - Enables the authentication method - * - ``--disable`` - - Disables the authentication method - * - ``-c`` - - Additional configuration option (in the ``KEY=VALUE`` format) - * - ``-f`` - - Path to file containing additional configuration options (in the ``KEY=VALUE`` format) - -**Example:** Disable the authentication method with id ``am2exxxxxxxxx`` for the account id ``123456789123``. - -.. code:: - - avn account authentication-method update 123456789123 am2exxxxxxxxx --disable diff --git a/docs/tools/cli/card.rst b/docs/tools/cli/card.rst deleted file mode 100644 index 140a3956f5..0000000000 --- a/docs/tools/cli/card.rst +++ /dev/null @@ -1,99 +0,0 @@ -``avn card`` -=============================== - -This article has the full list of commands for managing credit card details using ``avn card``. - -``avn card add`` -'''''''''''''''' - -Adds a new credit card. - -.. list-table:: - :header-rows: 1 - :align: left - - * - Parameter - - Information - * - ``--cvc`` - - Credit card security code - * - ``--exp-month`` - - Card expiration month (1-12) - * - ``--exp-year`` - - Card expiration year - * - ``--name`` - - Name on the credit card - * - ``--number`` - - Credit card number - * - ``--update-project`` - - Assign card to a project - -**Example:** - -.. code:: shell - - avn card add --cvc 123 \ - --exp-month 01 \ - --exp-year 2031 \ - --name "Name Surname" \ - --number 4111111111111111 - -``avn card list`` -''''''''''''''''' - -Lists all credit cards: - -.. code:: shell - - avn card list - - -``avn card remove`` -''''''''''''''''''' - -Removes a credit card. - -.. list-table:: - :header-rows: 1 - :align: left - - * - Parameter - - Information - * - ``card-id`` - - The ID shown for this card in the ``list`` command output - - -**Example:** - -.. code:: shell - - avn card remove AAAAAAAA-BBBB-CCCC-DDDD-0123456789AB - -``avn card update`` -''''''''''''''''''' - -Updates a credit card. - -.. list-table:: - :header-rows: 1 - :align: left - - * - Parameter - - Information - * - ``card-id`` - - The ID shown for this card in the ``list`` command output - * - ``--exp-month`` - - Card expiration month (1-12) - * - ``--exp-year`` - - Card expiration year (YYYY) - * - ``--name`` - - Name on the credit card - - -**Example:** - -.. code:: shell - - avn card update AAAAAAAA-BBBB-CCCC-DDDD-0123456789AB \ - --exp-month 01 \ - --exp-year 2031 \ - --name "Name Surname" diff --git a/docs/tools/cli/mirrormaker.rst b/docs/tools/cli/mirrormaker.rst index 36b5fec8c1..215cc0af00 100644 --- a/docs/tools/cli/mirrormaker.rst +++ b/docs/tools/cli/mirrormaker.rst @@ -18,13 +18,15 @@ Creates a new Aiven for Apache Kafka® MirrorMaker 2 replication flow. Before creating a replication flow, an :ref:`integration ` needs to be created between the Aiven for Apache Kafka MirrorMaker 2 service and each of the source and the target services. - E.g. An integration with alias ``kafka-target-alias`` between an Aiven for Apache Kafka service named ``kafka-target`` and an Aiven for Apache Kafka MirrorMaker 2 named ``kafka-mm`` can be created with:: - - avn service integration-create \ - -s kafka-target \ - -d kafka-mm \ - -t kafka_mirrormaker \ - -c cluster_alias=kafka-target-alias + E.g. An integration with alias ``kafka-target-alias`` between an Aiven for Apache Kafka service named ``kafka-target`` and an Aiven for Apache Kafka MirrorMaker 2 named ``kafka-mm`` can be created with: + + .. code:: + + avn service integration-create \ + -s kafka-target \ + -d kafka-mm \ + -t kafka_mirrormaker \ + -c cluster_alias=kafka-target-alias At most **one** replication flow can be build between any two Aiven for Apache Kafka services. diff --git a/docs/tools/cli/service/flink.rst b/docs/tools/cli/service/flink.rst index 49578f366e..7b314a5c8c 100644 --- a/docs/tools/cli/service/flink.rst +++ b/docs/tools/cli/service/flink.rst @@ -228,12 +228,14 @@ Create an Aiven for Flink® application version in a specified project and servi Sinking data using the :doc:`Slack connector `, doesn't need an integration. - **Example**: to create an integration between an Aiven for Apache Flink service named ``flink-democli`` and an Aiven for Apache Kafka service named ``demo-kafka`` you can use the following command:: + **Example**: to create an integration between an Aiven for Apache Flink service named ``flink-democli`` and an Aiven for Apache Kafka service named ``demo-kafka`` you can use the following command: + + .. code:: - avn service integration-create \ - --integration-type flink \ - --dest-service flink-democli \ - --source-service demo-kafka + avn service integration-create \ + --integration-type flink \ + --dest-service flink-democli \ + --source-service demo-kafka All the available command integration options can be found in the :ref:`dedicated document ` diff --git a/docs/tools/cli/service/integration.rst b/docs/tools/cli/service/integration.rst index 6a7a517958..e587c3ec54 100644 --- a/docs/tools/cli/service/integration.rst +++ b/docs/tools/cli/service/integration.rst @@ -195,6 +195,8 @@ An example of ``avn service integration-endpoint-types-list`` output: Send service logs to remote syslog rsyslog alerta, alertmanager, cassandra, clickhouse, elasticsearch, flink, grafana, influxdb, kafka, kafka_connect, kafka_mirrormaker, m3aggregator, m3coordinator, m3db, mysql, opensearch, pg, redis, sw Send service metrics to SignalFX signalfx kafka +.. _avn-service-integration-endpoint-update: + ``avn service integration-endpoint-update`` ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' diff --git a/docs/tools/cli/service/schema-registry-acl.rst b/docs/tools/cli/service/schema-registry-acl.rst index d2f193dd7d..59ab4a3b16 100644 --- a/docs/tools/cli/service/schema-registry-acl.rst +++ b/docs/tools/cli/service/schema-registry-acl.rst @@ -12,9 +12,11 @@ Using the following commands you can manage :doc:`Karapace schema registry autho ``avn service schema-registry-acl-add`` ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' -You can add a Karapace schema registry ACL entry by using the command:: +You can add a Karapace schema registry ACL entry by using the command: - avn service schema-registry-acl-add +.. code:: + + avn service schema-registry-acl-add Where: @@ -42,19 +44,21 @@ The following example shows you how to add an ACL entry to grant a user (``user_ .. code:: - avn service schema-registry-acl-add kafka-doc \ - --username 'user_1' \ - --permission schema_registry_read \ - --resource 'Subject:s1' + avn service schema-registry-acl-add kafka-doc \ + --username 'user_1' \ + --permission schema_registry_read \ + --resource 'Subject:s1' .. Note:: You cannot edit a Karapace schema registry ACL entry. You need to create a new entry and delete the older entry. ``avn service schema-registry-acl-delete`` ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' -You can delete a Karapace schema registry ACL entry using the command:: +You can delete a Karapace schema registry ACL entry using the command: - avn service schema-registry-acl-delete +.. code:: + + avn service schema-registry-acl-delete Where: @@ -69,17 +73,21 @@ Where: * - ``acl_id`` - The ID of the Karapace schema registry ACL to delete -**Example** +**Example:** + The following example deletes the Karapace schema registry ACL with ID ``acl3604f96c74a`` on the Aiven for Apache Kafka® instance named ``kafka-doc``. + .. code:: - avn service schema-registry-acl-delete kafka-doc acl3604f96c74a + avn service schema-registry-acl-delete kafka-doc acl3604f96c74a ``avn service schema-registry-acl-list`` ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' -You can view a list of all Karapace schema registry ACL entries defined using the command:: +You can view a list of all Karapace schema registry ACL entries defined using the command: - avn service schema-registry-acl-list +.. code:: + + avn service schema-registry-acl-list Where: @@ -93,11 +101,12 @@ Where: - The name of the service **Example:** + The following example lists the ACLs defined for an Aiven for Apache Kafka® service named ``kafka-doc``. .. code:: - avn service schema-registry-acl-list kafka-doc + avn service schema-registry-acl-list kafka-doc The command output is: diff --git a/docs/tools/terraform/concepts.rst b/docs/tools/terraform/concepts.rst deleted file mode 100644 index 634677c9dc..0000000000 --- a/docs/tools/terraform/concepts.rst +++ /dev/null @@ -1,6 +0,0 @@ -:orphan: - -Understanding the Aiven Terraform provider -========================================== - -:doc:`Data sources in Terraform ` \ No newline at end of file diff --git a/docs/tools/terraform/get-started.rst b/docs/tools/terraform/get-started.rst index 812364dc8c..11e7c05577 100644 --- a/docs/tools/terraform/get-started.rst +++ b/docs/tools/terraform/get-started.rst @@ -25,76 +25,76 @@ Terraform files declare the structure of the infrastructure, the dependencies, a Set up the Terraform project in an empty folder: -1. Create a new Terraform file, ``provider.tf``, to declare a dependency on the Aiven Provider for Terraform. +#. Create a new Terraform file, ``provider.tf``, to declare a dependency on the Aiven Provider for Terraform. -Add the following code to the file and specify the version in the ``required_providers`` block. You can find the latest version on the `Aiven Provider page `_. - -.. code:: terraform - - terraform { - required_providers { - aiven = { - source = "aiven/aiven" - version = ">=4.0.0, < 5.0.0" + Add the following code to the file and specify the version in the ``required_providers`` block. You can find the latest version on the `Aiven Provider page `_. + + .. code:: terraform + + terraform { + required_providers { + aiven = { + source = "aiven/aiven" + version = ">=4.0.0, < 5.0.0" + } + } + } + + provider "aiven" { + api_token = var.aiven_api_token } - } - } - - provider "aiven" { - api_token = var.aiven_api_token - } - -3. Create a file named ``redis.tf``. - -Add the following code to define the configuration of a single-node Aiven for Redis®* service: - -.. code:: terraform - # Redis service - - resource "aiven_redis" "single-node-aiven-redis" { - project = var.project_name - cloud_name = "google-northamerica-northeast1" - plan = "startup-4" - service_name = "gcp-single-node-redis1" - maintenance_window_dow = "monday" - maintenance_window_time = "10:00:00" - - redis_user_config { - redis_maxmemory_policy = "allkeys-random" - - public_access { - redis = true +#. Create a file named ``redis.tf``. + + Add the following code to define the configuration of a single-node Aiven for Redis®* service: + + .. code:: terraform + + # Redis service + + resource "aiven_redis" "single-node-aiven-redis" { + project = var.project_name + cloud_name = "google-northamerica-northeast1" + plan = "startup-4" + service_name = "gcp-single-node-redis1" + maintenance_window_dow = "monday" + maintenance_window_time = "10:00:00" + + redis_user_config { + redis_maxmemory_policy = "allkeys-random" + + public_access { + redis = true + } } } - } -5. Create a file named ``variables.tf``. This is used to avoid including sensitive information in source control. +#. Create a file named ``variables.tf``. This is used to avoid including sensitive information in source control. -Add the following code to declare the API token and project name variables: + Add the following code to declare the API token and project name variables: -.. code:: terraform - - variable "aiven_api_token" { - description = "Aiven console API token" - type = string - } + .. code:: terraform - variable "project_name" { - description = "Aiven console project name" - type = string - } + variable "aiven_api_token" { + description = "Aiven console API token" + type = string + } + + variable "project_name" { + description = "Aiven console project name" + type = string + } -6. Create a file named ``terraform.tfvars`` to define the values of the sensitive information. - -Add the following code, replacing ``AIVEN_AUTHENTICATION_TOKEN`` with your API token and ``AIVEN_PROJECT_NAME`` with the name of your project: - -.. code:: terraform +#. Create a file named ``terraform.tfvars`` to define the values of the sensitive information. - aiven_api_token = "AIVEN_AUTHENTICATION_TOKEN" - project_name = "AIVEN_PROJECT_NAME" + Add the following code, replacing ``AIVEN_AUTHENTICATION_TOKEN`` with your API token and ``AIVEN_PROJECT_NAME`` with the name of your project: + + .. code:: terraform + + aiven_api_token = "AIVEN_AUTHENTICATION_TOKEN" + project_name = "AIVEN_PROJECT_NAME" .. _plan-and-apply: @@ -102,29 +102,29 @@ Add the following code, replacing ``AIVEN_AUTHENTICATION_TOKEN`` with your API t Plan and apply the configuration ''''''''''''''''''''''''''''''''' -1. The ``init`` command prepares the working directly for use with Terraform. Run this command to automatically find, download, and install the necessary Aiven Provider plugins: +#. The ``init`` command prepares the working directly for use with Terraform. Run this command to automatically find, download, and install the necessary Aiven Provider plugins: -.. code:: bash - - terraform init + .. code:: bash -2. Run the ``plan`` command to create an execution plan and preview the changes that will be made (for example, what resources will be created or modified): + terraform init -.. code:: bash +#. Run the ``plan`` command to create an execution plan and preview the changes that will be made (for example, what resources will be created or modified): - terraform plan - -3. To create the resources, run: + .. code:: bash + + terraform plan -.. code:: bash +#. To create the resources, run: - terraform apply --auto-approve + .. code:: bash + + terraform apply --auto-approve The output will be similar to the following: .. code:: bash - Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + Apply complete! Resources: 1 added, 0 changed, 0 destroyed. You can also see the new Redis service in the `Aiven Console `_. @@ -133,32 +133,33 @@ Clean up To delete the service and its data: -1. Create a destroy plan and preview the changes to your infrastructure with the following command: - -.. code:: bash - - terraform plan -destroy +#. Create a destroy plan and preview the changes to your infrastructure with the following command: -2. To delete the resources and all data, run: - -.. code:: bash - - terraform destroy + .. code:: bash + + terraform plan -destroy -3. Enter "yes" to confirm. The output will be similar to the following: +#. To delete the resources and all data, run: -.. code:: bash + .. code:: bash + + terraform destroy - Do you really want to destroy all resources? - Terraform will destroy all your managed infrastructure, as shown above. - There is no undo. Only 'yes' will be accepted to confirm. +#. Enter "yes" to confirm. The output will be similar to the following: - Enter a value: yes - ... - Destroy complete! Resources: 1 destroyed. + .. code:: bash + + Do you really want to destroy all resources? + Terraform will destroy all your managed infrastructure, as shown above. + There is no undo. Only 'yes' will be accepted to confirm. + + Enter a value: yes + ... + Destroy complete! Resources: 1 destroyed. Next steps ''''''''''' + * Try `another sample project `_ to set up integrated Aiven for Kafka®, PostgreSQL®, InfluxDB®, and Grafana® services. * Read the `Terraform Docs `_ to learn about more complex project structures. diff --git a/docs/tools/terraform/howto.rst b/docs/tools/terraform/howto.rst deleted file mode 100644 index 35c4c20372..0000000000 --- a/docs/tools/terraform/howto.rst +++ /dev/null @@ -1,6 +0,0 @@ -:orphan: - -HowTo -===== - -:doc:`Get started with Aiven Provider for Terraform ` \ No newline at end of file diff --git a/docs/tools/terraform/howto/config-postgresql-provider.rst b/docs/tools/terraform/howto/config-postgresql-provider.rst index 41f99be248..c1df7d9e5e 100644 --- a/docs/tools/terraform/howto/config-postgresql-provider.rst +++ b/docs/tools/terraform/howto/config-postgresql-provider.rst @@ -12,70 +12,71 @@ The new provider must be added to the ``required_providers`` block in the Terraf 1. This example shows how to add the PostgreSQL provider (source: ``cyrilgdn/postgresql``) along with the Aiven Terraform Provider (source: ``aiven/aiven``). -.. code:: terraform - - terraform { - required_providers { - aiven = { - source = "aiven/aiven" - version = ">=4.0.0, < 5.0.0" + .. code:: terraform + + terraform { + required_providers { + aiven = { + source = "aiven/aiven" + version = ">=4.0.0, < 5.0.0" + } + postgresql = { + source = "cyrilgdn/postgresql" + version = "1.16.0" + } } - postgresql = { - source = "cyrilgdn/postgresql" - version = "1.16.0" - } } - } 2. If the PostgreSQL provider is used on its own, you can provide the Aiven for PostgreSQL service connection details as follows: -.. code:: terraform - - provider "postgresql" { - host = "pg-serivicename-projectname.aivencloud.com" - port = 12691 - database = "defaultdb" - username = "avnadmin" - password = "postgres_password" - sslmode = "require" - connect_timeout = 15 - } - -Optionally, when the Aiven for PostgreSQL service is created within the same Terraform project, the values required to configure the PostgreSQL provider can be passed using references to the resource, as shown in the code below: + .. code:: terraform -.. code:: terraform - - resource "aiven_pg" "demo-pg" { - project = var.project_name - cloud_name = "google-asia-southeast1" - plan = "business-8" - service_name = "demo-pg" - termination_protection = true - } - - # PostgreSQL provider is configured with references to the aiven_pg.demo-pg resource. - - provider "postgresql" { - host = aiven_pg.demo-pg.service_host - port = aiven_pg.demo-pg.service_port - database = aiven_pg.demo-pg.pg.dbname - username = aiven_pg.demo-pg.service_username - password = aiven_pg.demo-pg.service_password - sslmode = "require" - connect_timeout = 15 - } + provider "postgresql" { + host = "pg-serivicename-projectname.aivencloud.com" + port = 12691 + database = "defaultdb" + username = "avnadmin" + password = "postgres_password" + sslmode = "require" + connect_timeout = 15 + } + Optionally, when the Aiven for PostgreSQL service is created within the same Terraform project, the values required to configure the PostgreSQL provider can be passed using references to the resource, as shown in the code below: + + .. code:: terraform + + resource "aiven_pg" "demo-pg" { + project = var.project_name + cloud_name = "google-asia-southeast1" + plan = "business-8" + service_name = "demo-pg" + termination_protection = true + } + + # PostgreSQL provider is configured with references to the aiven_pg.demo-pg resource. + + provider "postgresql" { + host = aiven_pg.demo-pg.service_host + port = aiven_pg.demo-pg.service_port + database = aiven_pg.demo-pg.pg.dbname + username = aiven_pg.demo-pg.service_username + password = aiven_pg.demo-pg.service_password + sslmode = "require" + connect_timeout = 15 + } + 3. Create a PostgreSQL role called ``test_role`` using the Terraform resource ``postgresql_role.my_role``. -.. code:: terraform - - resource "postgresql_role" "my_role" { - name = "test_role" - } - -.. note:: - - For the full documentation of the ``Aiven Terraform Provider`` refer to `Aiven provider documentation `_. - - For the full list of resources available in ``PostgreSQL provider`` refer to `PostgreSQL provider documentation `_. - + .. code:: terraform + + resource "postgresql_role" "my_role" { + name = "test_role" + } + + .. note:: + + For the full documentation of the ``Aiven Terraform Provider`` refer to `Aiven provider documentation `_. + + For the full list of resources available in ``PostgreSQL provider`` refer to `PostgreSQL provider documentation `_. + + \ No newline at end of file diff --git a/docs/tools/terraform/howto/terraform-logging.rst b/docs/tools/terraform/howto/terraform-logging.rst index 03dabf3d5b..16826a6898 100644 --- a/docs/tools/terraform/howto/terraform-logging.rst +++ b/docs/tools/terraform/howto/terraform-logging.rst @@ -44,7 +44,7 @@ If you are encountering issues when deploying your Aiven service via Terraform, 3. To generate an example of the core and provider logs,run: -.. code:: Shell + .. code:: Shell - terraform refresh + terraform refresh diff --git a/docs/tools/terraform/howto/update-deprecated-resources.rst b/docs/tools/terraform/howto/update-deprecated-resources.rst index 32c7053f02..1a46620370 100644 --- a/docs/tools/terraform/howto/update-deprecated-resources.rst +++ b/docs/tools/terraform/howto/update-deprecated-resources.rst @@ -9,21 +9,21 @@ Use the following steps to migrate from resources that have been deprecated or r In the following example, the ``aiven_database`` field is migrated to the new ``aiven_pg_database`` field for an Aiven for PostgreSQL® service. 1. Replace references to the deprecated field with the new field. In the following file ``aiven_database`` was replaced with ``aiven_pg_database``: - -.. code:: - - - resource "aiven_database" "mydatabase" { - project = aiven_project.myproject.project - service_name = aiven_pg.mypg.service_name - database_name = "" - } - - - + resource "aiven_pg_database" "mydatabase" { - project = aiven_project.myproject.project - service_name = aiven_pg.mypg.service_name - database_name = "" - } + + .. code:: + + - resource "aiven_database" "mydatabase" { + project = aiven_project.myproject.project + service_name = aiven_pg.mypg.service_name + database_name = "" + } + + + + resource "aiven_pg_database" "mydatabase" { + project = aiven_project.myproject.project + service_name = aiven_pg.mypg.service_name + database_name = "" + } 2. View a list of all resources in the state file: diff --git a/docs/tools/terraform/howto/upgrade-provider-v1-v2.rst b/docs/tools/terraform/howto/upgrade-provider-v1-v2.rst index e78ca23b4e..195ad50999 100644 --- a/docs/tools/terraform/howto/upgrade-provider-v1-v2.rst +++ b/docs/tools/terraform/howto/upgrade-provider-v1-v2.rst @@ -54,33 +54,32 @@ Upgrade Terraform 0.12 to 0.13 Between v0.12 and v0.13, the syntax of Terraform files changed. If you have the older syntax, follow these steps to get the updated syntax: - -1. Upgrade your modules first by installing Terraform v0.13.x (i.e. 0.13.7): -``tfenv install 0.13.7 && tfenv use 0.13.7`` and then using ``0.13upgrade`` tool. +1. Upgrade your modules first by installing Terraform v0.13.x (i.e. 0.13.7): ``tfenv install 0.13.7 && tfenv use 0.13.7`` and then using ``0.13upgrade`` tool. 2. Update ``required_version`` from ``>= 0.12`` to ``>= 0.13`` in the requirements block. 3. Update the existing state file, by running: -``terraform state replace-provider registry.terraform.io/-/aiven registry.terraform.io/aiven/aiven`` -you will replace old Aiven terraform provider references to the new format. + + ``terraform state replace-provider registry.terraform.io/-/aiven registry.terraform.io/aiven/aiven`` + you will replace old Aiven terraform provider references to the new format. 4. Run ``terraform 0.13upgrade`` to see any additional fixes recommended by HashiCorp. -If you are using more providers than Aiven provider you most likely need to upgrade them as well. -More information `here `_. + If you are using more providers than Aiven provider you most likely need to upgrade them as well. + More information `here `_. 5. Run ``terraform init -upgrade`` -.. image:: /images/tools/terraform/terraform-upgrade.jpg - :alt: Screenshot of the upgrade command in action - -You may see warnings or errors like the above, these will point towards -changes made between the release you are running and the latest release. - -The warnings will provide recommendations on the changes to make and you -can get more information using our -`docs `_. - -Now we can remove the old Terraform folder ``rm -rf ~/.terraform.d``. + .. image:: /images/tools/terraform/terraform-upgrade.jpg + :alt: Screenshot of the upgrade command in action + + You may see warnings or errors like the above, these will point towards + changes made between the release you are running and the latest release. + + The warnings will provide recommendations on the changes to make and you + can get more information using our + `docs `_. + + Now we can remove the old Terraform folder ``rm -rf ~/.terraform.d``. 6. As the last step run ``terraform plan`` diff --git a/docs/tools/terraform/howto/upgrade-provider-v2-v3.rst b/docs/tools/terraform/howto/upgrade-provider-v2-v3.rst index 4a7a24089a..04d529a478 100644 --- a/docs/tools/terraform/howto/upgrade-provider-v2-v3.rst +++ b/docs/tools/terraform/howto/upgrade-provider-v2-v3.rst @@ -60,9 +60,10 @@ To safely make this change you will: - Import already existing resource to the Terraform state. 1. To change from the old ``aiven_vpc_peering_connection`` to the new ``aiven_azure_vpc_peering_connection`` resource, -the resource type should be changed. -Any references to ``aiven_vpc_peering_connection.foo.*`` should be updated to instead read ``aiven_azure_vpc_peering_connection.foo.*`` instead. -Here's an example showing the update in action: + the resource type should be changed. + Any references to ``aiven_vpc_peering_connection.foo.*`` should be updated to instead read ``aiven_azure_vpc_peering_connection.foo.*`` instead. + + Here's an example showing the update in action: .. code:: @@ -97,9 +98,9 @@ Here's an example showing the update in action: terraform state rm aiven_vpc_peering_connection.foo - .. tip:: + .. tip:: - Use the ``-dry-run`` flag to see this change before it is actually made + Use the ``-dry-run`` flag to see this change before it is actually made 4. Add the resource back to Terraform by importing it as a new resource with the new type: diff --git a/docs/tools/terraform/howto/upgrade-provider-v3-v4.rst b/docs/tools/terraform/howto/upgrade-provider-v3-v4.rst index 893220486c..4ae076c828 100644 --- a/docs/tools/terraform/howto/upgrade-provider-v3-v4.rst +++ b/docs/tools/terraform/howto/upgrade-provider-v3-v4.rst @@ -51,42 +51,53 @@ In this example, the ``aiven_database`` field is updated to the service-specific 1. Update ``aiven_database`` references to ``aiven_pg_database`` as in this example file: -.. code:: - - - resource "aiven_database" "mydatabase" { - project = aiven_project.myproject.project - service_name = aiven_pg.mypg.service_name - database_name = "" - } - - - + resource "aiven_pg_database" "mydatabase" { - project = aiven_project.myproject.project - service_name = aiven_pg.mypg.service_name - database_name = "" - } + .. code:: + + - resource "aiven_database" "mydatabase" { + project = aiven_project.myproject.project + service_name = aiven_pg.mypg.service_name + database_name = "" + } + + + + resource "aiven_pg_database" "mydatabase" { + project = aiven_project.myproject.project + service_name = aiven_pg.mypg.service_name + database_name = "" + } -2. View a list of all resources in the state file:: +2. View a list of all resources in the state file: - terraform state list + .. code:: -3. Remove the resource from the control of Terraform:: + terraform state list - terraform state rm aiven_database +3. Remove the resource from the control of Terraform: + + .. code:: + + terraform state rm aiven_database -.. tip:: - Use the ``-dry-run`` flag to preview the changes without applying them. - -4. Add the resource back to Terraform by importing it as a new resource:: + .. tip:: + + Use the ``-dry-run`` flag to preview the changes without applying them. - terraform import aiven_pg_database project_name/service_name/db_name +4. Add the resource back to Terraform by importing it as a new resource: -5. Check that the import is going to run as you expect:: + .. code:: + + terraform import aiven_pg_database project_name/service_name/db_name - terraform plan +5. Check that the import is going to run as you expect: + + .. code:: -6. Apply the new configuration:: + terraform plan - terraform apply +6. Apply the new configuration: + + .. code:: + + terraform apply You can follow these steps to update the other resources that were deprecated in version 3 of the provider. diff --git a/docs/tools/terraform/howto/upgrade-to-opensearch.rst b/docs/tools/terraform/howto/upgrade-to-opensearch.rst index cae42d0fb3..24fcb84ac1 100644 --- a/docs/tools/terraform/howto/upgrade-to-opensearch.rst +++ b/docs/tools/terraform/howto/upgrade-to-opensearch.rst @@ -12,71 +12,75 @@ To upgrade an existing Elasticsearch service to OpenSearch using Terraform: Use the following steps to complete the upgrade safely: 1. Change the ``elasticsearch_version = 7`` to ``opensearch_version = 1``. This is the equivalent to clicking the migrate button in the console. - -.. code-block:: - - # Existing Elasticsearch Resource - resource "aiven_elasticsearch" "es" { - project = "project-name" - cloud_name = "google-us-east4" - plan = "business-4" - service_name = "es" - - elasticsearch_user_config { - elasticsearch_version = 7 - } - } - -.. code-block:: - - # Modified Elasticsearch Resource, upgrades to OpenSearch v1 - resource "aiven_elasticsearch" "es" { - project = "project-name" - cloud_name = "google-us-east4" - plan = "business-4" - service_name = "es" - - elasticsearch_user_config { - opensearch_version = 1 - } - } - -Once you have updated your configuration, check that the change looks correct:: - - terraform plan - -Apply the upgrade:: - - terraform apply - -Your service will now upgrade to OpenSearch, and if you view it in the web console, it will show as an OpenSearch service. + + .. code-block:: + + # Existing Elasticsearch Resource + resource "aiven_elasticsearch" "es" { + project = "project-name" + cloud_name = "google-us-east4" + plan = "business-4" + service_name = "es" + + elasticsearch_user_config { + elasticsearch_version = 7 + } + } + + .. code-block:: + + # Modified Elasticsearch Resource, upgrades to OpenSearch v1 + resource "aiven_elasticsearch" "es" { + project = "project-name" + cloud_name = "google-us-east4" + plan = "business-4" + service_name = "es" + + elasticsearch_user_config { + opensearch_version = 1 + } + } + + Once you have updated your configuration, check that the change looks correct: + + .. code:: + + terraform plan + + Apply the upgrade: + + .. code:: + + terraform apply + + Your service will now upgrade to OpenSearch, and if you view it in the web console, it will show as an OpenSearch service. 2. After the migration you will need to remove the Elasticsearch service from the Terraform state. -.. code-block:: + .. code-block:: - terraform state rm 'aiven_elasticsearch.' + terraform state rm 'aiven_elasticsearch.' 3. Update the resource configuration to be an OpenSearch resource type, the example shown above would then look like this: -.. code-block:: + .. code-block:: - resource "aiven_opensearch" "os" { - project = "project-name" - cloud_name = "google-us-east4" - plan = "business-4" - service_name = "es" + resource "aiven_opensearch" "os" { + project = "project-name" + cloud_name = "google-us-east4" + plan = "business-4" + service_name = "es" - opensearch_user_config { - opensearch_version = 1 + opensearch_user_config { + opensearch_version = 1 + } } - } -Bring the Terraform state back in sync with your OpenSearch service by importing the service. - -.. code-block:: +4. Bring the Terraform state back in sync with your OpenSearch service by importing the service. - terraform import 'aiven_opensearch.os' / + .. code-block:: + + terraform import 'aiven_opensearch.os' / Your Elasticsearch service has been upgraded to OpenSearch with Terraform, and the resource configuration updated to use a resource type of OpenSearch. diff --git a/docs/tools/terraform/howto/vpc-peering-aws.rst b/docs/tools/terraform/howto/vpc-peering-aws.rst index 675bf43b47..217128a44b 100644 --- a/docs/tools/terraform/howto/vpc-peering-aws.rst +++ b/docs/tools/terraform/howto/vpc-peering-aws.rst @@ -12,7 +12,7 @@ Prerequisites: * Create an :doc:`Aiven authentication token `. -* `Install the AWS CLI `_. +* `Install the AWS CLI `_. * `Configure the AWS CLI `_. diff --git a/docs/tools/terraform/reference.rst b/docs/tools/terraform/reference.rst deleted file mode 100644 index 059ad8ea3f..0000000000 --- a/docs/tools/terraform/reference.rst +++ /dev/null @@ -1,8 +0,0 @@ -:orphan: - -Reference -========= - -`Aiven Terraform Cookbook `_ - -:doc:`Troubleshooting ` \ No newline at end of file diff --git a/docs/tools/terraform/reference/troubleshooting/private-access-error.rst b/docs/tools/terraform/reference/troubleshooting/private-access-error.rst index c6b1ce955f..f40c586b99 100644 --- a/docs/tools/terraform/reference/troubleshooting/private-access-error.rst +++ b/docs/tools/terraform/reference/troubleshooting/private-access-error.rst @@ -3,7 +3,8 @@ Private access error when using VPC When trying to set the Terraform argument ``private_access`` you may encounter the following error message: -.. Error:: +.. code:: + Error: Private only access to service ports cannot be enabled for the service's network This error message is seen because the ``private_access`` argument is restricted to certain specific networks within Aiven's internal systems. diff --git a/docs/tutorials/anomaly-detection.rst b/docs/tutorials/anomaly-detection.rst index 64782eb3b7..f3856a2599 100644 --- a/docs/tutorials/anomaly-detection.rst +++ b/docs/tutorials/anomaly-detection.rst @@ -119,17 +119,15 @@ The :doc:`Aiven for Apache Kafka ` service is responsible Customise the Aiven for Apache Kafka service '''''''''''''''''''''''''''''''''''''''''''' -Now that your service is created, you need to customise its functionality. In the **Overview** tab of your freshly created service, you'll see a bunch of toggles and properties. Change these two: +Now that your service is created, you need to customise its functionality. On the **Service settings** page of your freshly created service, you'll see a bunch of toggles and properties. Change these two: 1. Enable the Apache Kafka REST APIs to manage and query via the Aiven Console. - Navigate to **Kafka REST API (Karapace)** > **Enable**. - + Navigate to **Service settings** page > **Service management** section > actions (**...**) menu > **Enable REST API (Karapace)**. 2. Enable the :doc:`automatic creation of Apache Kafka topics ` to create new Apache Kafka® topics on the fly while pushing a first record. - Navigate to **Advanced configuration** > **Add configuration option** > ``kafka.auto_create_topics_enable``, switch the setting on and then click **Save advanced configuration**. - + Navigate to **Service settings** page > **Advanced configuration** section > **Configure** > **Add configuration options** > ``kafka.auto_create_topics_enable``, enable the selected parameter using the toggle switch, and select **Save configuration**. Create an Aiven for PostgreSQL® service ''''''''''''''''''''''''''''''''''''''''' @@ -147,9 +145,9 @@ You can create the Aiven for PostgreSQL database with the following steps: 5. Select `Startup-4` as service plan. The `Startup-4` plan allows you to define the service integrations needed to define Apache Flink streaming transformations over the data in the PostgreSQL® table. -5. Enter ``demo-postgresql`` as name for your service. +6. Enter ``demo-postgresql`` as name for your service. -6. Click **Create service** under the summary on the right side of the console +7. Click **Create service** under the summary on the right side of the console Create an Aiven for Apache Flink service @@ -172,8 +170,6 @@ You can create the Aiven for Apache Flink service with the following steps: 6. Click **Create Service** under the summary on the right side of the console. - - Integrate Aiven for Apache Flink service with sources and sinks ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' @@ -182,26 +178,18 @@ After creating the service, you'll be redirected to the service details page. Ap * Aiven for Apache Kafka®, which contains the stream of IoT sensor readings. * Aiven for PostgreSQL®, which contains the alerting thresholds. -You can define the service integrations, in the Aiven for Apache Flink® **Overview** tab, with the following steps: - -1. Click **Get started** on the banner at the top of the **Overview** page. +You can define the service integrations, on the Aiven for Apache Flink® **Overview** page, with the following steps: - .. image:: /images/tutorials/anomaly-detection/flink-console-integration.png - :alt: Aiven for Apache Flink Overview tab, showing the **Get started** button - -2. Select **Aiven for Apache Kafka®** and then select the ``demo-kafka`` service. -3. Click **Integrate**. -4. Click the **+** icon under *Data Flow*. -5. Check the **Aiven for PostgreSQL** checkbox in the ``Aiven Data Services`` section. -6. Select **Aiven for PostgreSQL®** and then select the ``demo-postgresql`` service. -7. Click **Integrate**. +1. Select **Create data pipeline** in section **Create and manage your data streams with ease** at the top of the **Overview** page. +2. In the **Data Service Integrations** window, select the **Aiven for Apache Kafka** checkbox and, next, select the ``demo-kafka`` service. Select **Integrate**. +3. Back on the **Overview** page, in the **Data Flow** section, select the **+** icon. +4. In the **Data Service Integrations** window, select the **Aiven for PostgreSQL** checkbox and, next, select the ``demo-postgresql`` service. Select **Integrate**. Once the above steps are completed, your **Data Flow** section should be similar to the below: .. image:: /images/tutorials/anomaly-detection/flink-integrations-done.png :alt: Aiven for Apache Flink Overview tab, showing the Integrations to Aiven for Apache Kafka and Aiven for PostgreSQL - Set up the IoT metrics streaming dataset ---------------------------------------- @@ -240,9 +228,11 @@ It's time to start streaming the fake IoT data that you'll later process with wi .. Note:: You can also use other existing data, although the examples in this tutorial are based on the IoT sample data. -1. Clone the `Dockerized fake data producer for Aiven for Apache Kafka® `_ repository to your computer:: - - git clone https://github.com/aiven/fake-data-producer-for-apache-kafka-docker.git +1. Clone the `Dockerized fake data producer for Aiven for Apache Kafka® `_ repository to your computer: + + .. code:: + + git clone https://github.com/aiven/fake-data-producer-for-apache-kafka-docker.git #. Navigate in the to the ``fake-data-producer-for-apache-kafka-docker`` directory and copy the ``conf/env.conf.sample`` file to ``conf/env.conf``. @@ -344,7 +334,7 @@ If you feel brave, you can go ahead and try try yourself in the `Aiven Console < 5. In the **Add source tables** tab, create the source table (named ``CPU_IN``), pointing to the Apache Kafka® topic ``cpu_load_stats_real`` where the IoT sensor readings are stored by: - * Select ``Aiven for Apache Kafka - demo-kafka`` as `Integrated service` + * Select ``Aiven for Apache Kafka - demo-kafka`` as *Integrated service*. * Paste the following SQL: .. literalinclude:: /code/products/flink/basic_cpu-in_table.md @@ -366,7 +356,7 @@ If you feel brave, you can go ahead and try try yourself in the `Aiven Console < 7. Create the sink table (named ``CPU_OUT_FILTER``), pointing to a new Apache Kafka® topic named ``cpu_load_stats_real_filter`` where the readings exceeding the ``80%`` threshold will land, by: * Clicking on the **Add your first sink table**. - * Selecting ``Aiven for Apache Kafka - demo-kafka`` as `Integrated service`. + * Selecting ``Aiven for Apache Kafka - demo-kafka`` as *Integrated service*. * Pasting the following SQL: .. literalinclude:: /code/products/flink/basic_cpu-out-filter_table.md @@ -469,7 +459,7 @@ You can go ahead an try yourself to define the windowing pipeline. If, on the ot 6. Create the sink table (named ``CPU_OUT_AGG``) pointing to a new Apache Kafka® topic named ``cpu_agg_stats``, where the 30 second aggregated data will land, by: * Clicking on the **Add your first sink table**. - * Selecting ``Aiven for Apache Kafka - demo-kafka`` as `Integrated service`. + * Selecting ``Aiven for Apache Kafka - demo-kafka`` as *Integrated service*. * Pasting the following SQL: .. literalinclude:: /code/products/flink/windowed_cpu-out-agg_table.md @@ -494,7 +484,7 @@ When the application is running, you should start to see messages containing th Create a threshold table in PostgreSQL '''''''''''''''''''''''''''''''''''''' -You will use a PostgreSQL table to store the various IoT thresholds based on the `hostname`. The table will later be used by a Flink application to compare the average CPU usage with the thresholds and send the notifications to a Slack channel. +You will use a PostgreSQL table to store the various IoT thresholds based on the ``hostname``. The table will later be used by a Flink application to compare the average CPU usage with the thresholds and send the notifications to a Slack channel. You can create the thresholds table in the ``demo-postgresql`` service with the following steps: @@ -565,7 +555,7 @@ To create the notification data pipeline, you can go ahead an try yourself or fo 4. To create a source table ``CPU_IN_AGG`` pointing to the Apache Kafka topic ``cpu_agg_stats``: * Click on **Add your first source table**. - * Select ``Aiven for Apache Kafka - demo-kafka`` as `Integrated service`. + * Select ``Aiven for Apache Kafka - demo-kafka`` as *Integrated service*. * Paste the following SQL: .. literalinclude:: /code/products/flink/windowed_cpu-in-agg_table.md @@ -576,7 +566,7 @@ To create the notification data pipeline, you can go ahead an try yourself or fo 5. To create a source table ``CPU_THRESHOLDS`` pointing to the PostgreSQL table ``cpu_thresholds``: * Click on **Add new table**. - * Select ``Aiven for PostgreSQL - demo-postgresql`` as `Integrated service`. + * Select ``Aiven for PostgreSQL - demo-postgresql`` as *Integrated service*. * Paste the following SQL: .. literalinclude:: /code/products/flink/pgthresholds_source-thresholds_table.md diff --git a/images/community/challenge-trophy.svg b/images/community/challenge-trophy.svg deleted file mode 100644 index 89f149f329..0000000000 --- a/images/community/challenge-trophy.svg +++ /dev/null @@ -1 +0,0 @@ - diff --git a/images/icon-tick.png b/images/icon-tick.png deleted file mode 100644 index 28053771f7..0000000000 Binary files a/images/icon-tick.png and /dev/null differ diff --git a/images/icon-cassandra.svg b/images/icons/icon-cassandra.svg similarity index 100% rename from images/icon-cassandra.svg rename to images/icons/icon-cassandra.svg diff --git a/images/icon-clickhouse.svg b/images/icons/icon-clickhouse.svg similarity index 100% rename from images/icon-clickhouse.svg rename to images/icons/icon-clickhouse.svg diff --git a/images/icons/icon-dragonfly.svg b/images/icons/icon-dragonfly.svg new file mode 100644 index 0000000000..5e9318bacd --- /dev/null +++ b/images/icons/icon-dragonfly.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/images/icon-elasticsearch.svg b/images/icons/icon-elasticsearch.svg similarity index 100% rename from images/icon-elasticsearch.svg rename to images/icons/icon-elasticsearch.svg diff --git a/images/icon-flink.svg b/images/icons/icon-flink.svg similarity index 100% rename from images/icon-flink.svg rename to images/icons/icon-flink.svg diff --git a/images/icon-grafana.svg b/images/icons/icon-grafana.svg similarity index 100% rename from images/icon-grafana.svg rename to images/icons/icon-grafana.svg diff --git a/images/icon-influxdb.svg b/images/icons/icon-influxdb.svg similarity index 100% rename from images/icon-influxdb.svg rename to images/icons/icon-influxdb.svg diff --git a/images/icon-kafka-connect.svg b/images/icons/icon-kafka-connect.svg similarity index 100% rename from images/icon-kafka-connect.svg rename to images/icons/icon-kafka-connect.svg diff --git a/images/icon-kafka-mirrormaker.svg b/images/icons/icon-kafka-mirrormaker.svg similarity index 100% rename from images/icon-kafka-mirrormaker.svg rename to images/icons/icon-kafka-mirrormaker.svg diff --git a/images/icon-kafka.svg b/images/icons/icon-kafka.svg similarity index 100% rename from images/icon-kafka.svg rename to images/icons/icon-kafka.svg diff --git a/images/icon-m3db.svg b/images/icons/icon-m3db.svg similarity index 100% rename from images/icon-m3db.svg rename to images/icons/icon-m3db.svg diff --git a/images/icon-mysql.svg b/images/icons/icon-mysql.svg similarity index 100% rename from images/icon-mysql.svg rename to images/icons/icon-mysql.svg diff --git a/images/icon-opensearch.png b/images/icons/icon-opensearch.png similarity index 100% rename from images/icon-opensearch.png rename to images/icons/icon-opensearch.png diff --git a/images/icon-pg.svg b/images/icons/icon-pg.svg similarity index 100% rename from images/icon-pg.svg rename to images/icons/icon-pg.svg diff --git a/images/icon-redis.svg b/images/icons/icon-redis.svg similarity index 100% rename from images/icon-redis.svg rename to images/icons/icon-redis.svg diff --git a/images/integrations/remote-syslog-service-integrations.png b/images/integrations/remote-syslog-service-integrations.png deleted file mode 100644 index 3c405a3635..0000000000 Binary files a/images/integrations/remote-syslog-service-integrations.png and /dev/null differ diff --git a/images/platform/backup-help-info.png b/images/platform/backup-help-info.png deleted file mode 100644 index 72e7fade52..0000000000 Binary files a/images/platform/backup-help-info.png and /dev/null differ diff --git a/images/platform/billing/billing_payment_options.png b/images/platform/billing/billing_payment_options.png deleted file mode 100644 index e88c3322c0..0000000000 Binary files a/images/platform/billing/billing_payment_options.png and /dev/null differ diff --git a/images/platform/billing/technical-emails.png b/images/platform/billing/technical-emails.png deleted file mode 100644 index 2d94030b23..0000000000 Binary files a/images/platform/billing/technical-emails.png and /dev/null differ diff --git a/images/platform/byoc-ipsec-ingress-direct.png b/images/platform/byoc-ipsec-ingress-direct.png deleted file mode 100644 index f654d32a56..0000000000 Binary files a/images/platform/byoc-ipsec-ingress-direct.png and /dev/null differ diff --git a/images/platform/byoc-ipsec-ingress.png b/images/platform/byoc-ipsec-ingress.png deleted file mode 100644 index cf3fc79337..0000000000 Binary files a/images/platform/byoc-ipsec-ingress.png and /dev/null differ diff --git a/images/platform/ca-download.png b/images/platform/ca-download.png deleted file mode 100644 index b175e82c91..0000000000 Binary files a/images/platform/ca-download.png and /dev/null differ diff --git a/images/platform/concepts/backup_location_preview.png b/images/platform/concepts/backup_location_preview.png deleted file mode 100644 index 07e0ed1bde..0000000000 Binary files a/images/platform/concepts/backup_location_preview.png and /dev/null differ diff --git a/images/platform/howto/add-addition-storage.png b/images/platform/howto/add-addition-storage.png deleted file mode 100644 index 78056ee899..0000000000 Binary files a/images/platform/howto/add-addition-storage.png and /dev/null differ diff --git a/images/platform/howto/upgrade-service-disk-space.png b/images/platform/howto/upgrade-service-disk-space.png deleted file mode 100644 index 932c84e189..0000000000 Binary files a/images/platform/howto/upgrade-service-disk-space.png and /dev/null differ diff --git a/images/platform/integrations/prometheus-advanced-configurations.png b/images/platform/integrations/prometheus-advanced-configurations.png deleted file mode 100644 index d32499f23c..0000000000 Binary files a/images/platform/integrations/prometheus-advanced-configurations.png and /dev/null differ diff --git a/images/platform/integrations/prometheus-endpoint-select.png b/images/platform/integrations/prometheus-endpoint-select.png deleted file mode 100644 index eeb89c5fcd..0000000000 Binary files a/images/platform/integrations/prometheus-endpoint-select.png and /dev/null differ diff --git a/images/platform/integrations/prometheus-service-info.png b/images/platform/integrations/prometheus-service-info.png deleted file mode 100644 index 2f39d4efb7..0000000000 Binary files a/images/platform/integrations/prometheus-service-info.png and /dev/null differ diff --git a/images/platform/power-off-confirmation.png b/images/platform/power-off-confirmation.png deleted file mode 100644 index f9213a8587..0000000000 Binary files a/images/platform/power-off-confirmation.png and /dev/null differ diff --git a/images/products/clickhouse/termination-prevention.png b/images/products/clickhouse/termination-prevention.png deleted file mode 100644 index e83bf03a16..0000000000 Binary files a/images/products/clickhouse/termination-prevention.png and /dev/null differ diff --git a/images/products/flink/console-overview.png b/images/products/flink/console-overview.png deleted file mode 100644 index d66fa1d416..0000000000 Binary files a/images/products/flink/console-overview.png and /dev/null differ diff --git a/images/products/flink/create-job.png b/images/products/flink/create-job.png deleted file mode 100644 index 20934f1678..0000000000 Binary files a/images/products/flink/create-job.png and /dev/null differ diff --git a/images/products/flink/create-table-pg.png b/images/products/flink/create-table-pg.png deleted file mode 100644 index 66a31adf9a..0000000000 Binary files a/images/products/flink/create-table-pg.png and /dev/null differ diff --git a/images/products/flink/create-table-topic.png b/images/products/flink/create-table-topic.png deleted file mode 100644 index 6e55edc7ba..0000000000 Binary files a/images/products/flink/create-table-topic.png and /dev/null differ diff --git a/images/products/grafana/enable-dashboard-previews.png b/images/products/grafana/enable-dashboard-previews.png deleted file mode 100644 index 5c30c7809a..0000000000 Binary files a/images/products/grafana/enable-dashboard-previews.png and /dev/null differ diff --git a/images/products/grafana/grafana-pitr-fork-restore.png b/images/products/grafana/grafana-pitr-fork-restore.png deleted file mode 100644 index 31b61ca2df..0000000000 Binary files a/images/products/grafana/grafana-pitr-fork-restore.png and /dev/null differ diff --git a/images/products/kafka/enable-sasl.png b/images/products/kafka/enable-sasl.png deleted file mode 100644 index 99bffed79a..0000000000 Binary files a/images/products/kafka/enable-sasl.png and /dev/null differ diff --git a/images/products/kafka/sasl-connect.png b/images/products/kafka/sasl-connect.png index e7a88a2b00..57cfebf28e 100644 Binary files a/images/products/kafka/sasl-connect.png and b/images/products/kafka/sasl-connect.png differ diff --git a/images/products/kafka/ssl-certificates-download.png b/images/products/kafka/ssl-certificates-download.png index b65ce871b2..2cc3fa1734 100644 Binary files a/images/products/kafka/ssl-certificates-download.png and b/images/products/kafka/ssl-certificates-download.png differ diff --git a/images/products/m3db/m3db-connection-details.png b/images/products/m3db/m3db-connection-details.png deleted file mode 100644 index db29a26a06..0000000000 Binary files a/images/products/m3db/m3db-connection-details.png and /dev/null differ diff --git a/images/products/m3db/telegraf-m3-example/m3_telegraph_01.png b/images/products/m3db/telegraf-m3-example/m3_telegraph_01.png deleted file mode 100644 index c24c45c572..0000000000 Binary files a/images/products/m3db/telegraf-m3-example/m3_telegraph_01.png and /dev/null differ diff --git a/images/products/m3db/telegraf-m3-example/m3_telegraph_02.png b/images/products/m3db/telegraf-m3-example/m3_telegraph_02.png deleted file mode 100644 index 57f83361fa..0000000000 Binary files a/images/products/m3db/telegraf-m3-example/m3_telegraph_02.png and /dev/null differ diff --git a/images/products/m3db/telegraf-m3-example/m3_telegraph_03.png b/images/products/m3db/telegraf-m3-example/m3_telegraph_03.png deleted file mode 100644 index 839355b4c7..0000000000 Binary files a/images/products/m3db/telegraf-m3-example/m3_telegraph_03.png and /dev/null differ diff --git a/images/products/m3db/telegraf-m3-example/m3_telegraph_04.png b/images/products/m3db/telegraf-m3-example/m3_telegraph_04.png deleted file mode 100644 index 4713240b48..0000000000 Binary files a/images/products/m3db/telegraf-m3-example/m3_telegraph_04.png and /dev/null differ diff --git a/images/products/m3db/telegraf-m3-example/m3_telegraph_05.png b/images/products/m3db/telegraf-m3-example/m3_telegraph_05.png deleted file mode 100644 index 8901e9af42..0000000000 Binary files a/images/products/m3db/telegraf-m3-example/m3_telegraph_05.png and /dev/null differ diff --git a/images/products/m3db/telegraf-m3-example/m3_telegraph_06.png b/images/products/m3db/telegraf-m3-example/m3_telegraph_06.png deleted file mode 100644 index 12f09ef12b..0000000000 Binary files a/images/products/m3db/telegraf-m3-example/m3_telegraph_06.png and /dev/null differ diff --git a/images/products/m3db/telegraf-m3-example/m3_telegraph_07.png b/images/products/m3db/telegraf-m3-example/m3_telegraph_07.png deleted file mode 100644 index 442f2473e1..0000000000 Binary files a/images/products/m3db/telegraf-m3-example/m3_telegraph_07.png and /dev/null differ diff --git a/images/products/m3db/telegraf-m3-example/m3_telegraph_08.png b/images/products/m3db/telegraf-m3-example/m3_telegraph_08.png deleted file mode 100644 index 35fa0ae621..0000000000 Binary files a/images/products/m3db/telegraf-m3-example/m3_telegraph_08.png and /dev/null differ diff --git a/images/products/m3db/telegraf-m3-example/m3_telegraph_09.png b/images/products/m3db/telegraf-m3-example/m3_telegraph_09.png deleted file mode 100644 index eb35c5c44d..0000000000 Binary files a/images/products/m3db/telegraf-m3-example/m3_telegraph_09.png and /dev/null differ diff --git a/images/products/m3db/telegraf-m3-example/m3_telegraph_10.png b/images/products/m3db/telegraf-m3-example/m3_telegraph_10.png deleted file mode 100644 index 3245b41ba2..0000000000 Binary files a/images/products/m3db/telegraf-m3-example/m3_telegraph_10.png and /dev/null differ diff --git a/images/products/mysql/mysql-service-overview.png b/images/products/mysql/mysql-service-overview.png deleted file mode 100644 index 7f06547297..0000000000 Binary files a/images/products/mysql/mysql-service-overview.png and /dev/null differ diff --git a/images/products/postgresql/connection-pool-details.png b/images/products/postgresql/connection-pool-details.png deleted file mode 100644 index 4717b73172..0000000000 Binary files a/images/products/postgresql/connection-pool-details.png and /dev/null differ diff --git a/images/products/postgresql/migrate-cloud.png b/images/products/postgresql/migrate-cloud.png deleted file mode 100644 index 9370a8a052..0000000000 Binary files a/images/products/postgresql/migrate-cloud.png and /dev/null differ diff --git a/images/products/postgresql/migrate-rebalancing.png b/images/products/postgresql/migrate-rebalancing.png deleted file mode 100644 index 8162fd4e39..0000000000 Binary files a/images/products/postgresql/migrate-rebalancing.png and /dev/null differ diff --git a/images/products/postgresql/migrate-running.png b/images/products/postgresql/migrate-running.png deleted file mode 100644 index da2f133830..0000000000 Binary files a/images/products/postgresql/migrate-running.png and /dev/null differ diff --git a/images/products/postgresql/pagila-load-sample-dataset.png b/images/products/postgresql/pagila-load-sample-dataset.png deleted file mode 100644 index dfd7937e00..0000000000 Binary files a/images/products/postgresql/pagila-load-sample-dataset.png and /dev/null differ diff --git a/images/products/postgresql/pg-connection-details.png b/images/products/postgresql/pg-connection-details.png deleted file mode 100644 index 6bf37c25ab..0000000000 Binary files a/images/products/postgresql/pg-connection-details.png and /dev/null differ diff --git a/images/products/postgresql/pg-long-running-queries.png b/images/products/postgresql/pg-long-running-queries.png deleted file mode 100644 index f282e600c6..0000000000 Binary files a/images/products/postgresql/pg-long-running-queries.png and /dev/null differ diff --git a/images/products/postgresql/replica-details.png b/images/products/postgresql/replica-details.png deleted file mode 100644 index 674882c000..0000000000 Binary files a/images/products/postgresql/replica-details.png and /dev/null differ diff --git a/images/products/redis/redis-acl.png b/images/products/redis/redis-acl.png deleted file mode 100644 index d59dad9ef4..0000000000 Binary files a/images/products/redis/redis-acl.png and /dev/null differ diff --git a/images/products/redis/redis-migration-in-progress.png b/images/products/redis/redis-migration-in-progress.png deleted file mode 100644 index 3b6a06eaab..0000000000 Binary files a/images/products/redis/redis-migration-in-progress.png and /dev/null differ diff --git a/images/products/redis/redis-migration-wizard.png b/images/products/redis/redis-migration-wizard.png deleted file mode 100644 index 5ac6c82b2f..0000000000 Binary files a/images/products/redis/redis-migration-wizard.png and /dev/null differ diff --git a/images/products/redis/redis-start-migration-2.png b/images/products/redis/redis-start-migration-2.png deleted file mode 100644 index cecbbcc7fd..0000000000 Binary files a/images/products/redis/redis-start-migration-2.png and /dev/null differ diff --git a/images/social_media/icon-blog.svg b/images/social_media/icon-blog.svg deleted file mode 100644 index 6ae83bf524..0000000000 --- a/images/social_media/icon-blog.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/images/social_media/icon-github.svg b/images/social_media/icon-github.svg deleted file mode 100644 index dacd4d2c6e..0000000000 --- a/images/social_media/icon-github.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/images/social_media/icon-twitter.svg b/images/social_media/icon-twitter.svg deleted file mode 100644 index e9614c64e3..0000000000 --- a/images/social_media/icon-twitter.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/images/social_media/icon-youtube.svg b/images/social_media/icon-youtube.svg deleted file mode 100644 index aa2cb76b16..0000000000 --- a/images/social_media/icon-youtube.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/images/tools/console/console_service.png b/images/tools/console/console_service.png deleted file mode 100644 index 977227cba2..0000000000 Binary files a/images/tools/console/console_service.png and /dev/null differ diff --git a/images/tutorials/anomaly-detection/auth-tokens.png b/images/tutorials/anomaly-detection/auth-tokens.png deleted file mode 100644 index 570fec3963..0000000000 Binary files a/images/tutorials/anomaly-detection/auth-tokens.png and /dev/null differ diff --git a/images/tutorials/anomaly-detection/flink-console-integration.png b/images/tutorials/anomaly-detection/flink-console-integration.png deleted file mode 100644 index 83d895622b..0000000000 Binary files a/images/tutorials/anomaly-detection/flink-console-integration.png and /dev/null differ diff --git a/includes/clouds-list.rst b/includes/clouds-list.rst index 3f27aaf6fd..d30d77a493 100644 --- a/includes/clouds-list.rst +++ b/includes/clouds-list.rst @@ -10,85 +10,85 @@ Amazon Web Services - Description * - Africa - ``aws-af-south-1`` - - Africa, South Africa + - Africa, South Africa: Cape Town * - Asia-Pacific - ``aws-ap-east-1`` - - Asia, Hong Kong + - Asia, Hong Kong: Hong Kong * - Asia-Pacific - ``aws-ap-northeast-1`` - - Asia, Japan + - Asia, Japan: Tokyo * - Asia-Pacific - ``aws-ap-northeast-2`` - - Asia, Korea + - Asia, Korea: Seoul * - Asia-Pacific - ``aws-ap-northeast-3`` - - Asia, Japan + - Asia, Japan: Osaka * - Asia-Pacific - ``aws-ap-south-1`` - - Asia, India + - Asia, India: Mumbai * - Asia-Pacific - ``aws-ap-south-2`` - - Asia, India + - Asia, India: Hyderabad * - Asia-Pacific - ``aws-ap-southeast-1`` - - Asia, Singapore + - Asia, Singapore: Singapore * - Asia-Pacific - ``aws-ap-southeast-3`` - - Asia, Jakarta + - Asia, Jakarta: Jakarta * - Australia - ``aws-ap-southeast-2`` - - Australia, New South Wales + - Australia, New South Wales: Sydney * - Australia - ``aws-ap-southeast-4`` - - Australia, Melbourne + - Australia, Melbourne: Melbourne * - Europe - ``aws-eu-central-1`` - - Europe, Germany + - Europe, Germany: Frankfurt * - Europe - ``aws-eu-central-2`` - - Europe, Switzerland + - Europe, Switzerland: Zurich * - Europe - ``aws-eu-north-1`` - - Europe, Sweden + - Europe, Sweden: Stockholm * - Europe - ``aws-eu-south-1`` - - Europe, Italy + - Europe, Italy: Milan * - Europe - ``aws-eu-south-2`` - - Europe, Spain + - Europe, Spain: Madrid * - Europe - ``aws-eu-west-1`` - - Europe, Ireland + - Europe, Ireland: Ireland * - Europe - ``aws-eu-west-2`` - - Europe, England + - Europe, England: London * - Europe - ``aws-eu-west-3`` - - Europe, France + - Europe, France: Paris * - Middle East - ``aws-me-central-1`` - - Middle East, UAE + - Middle East, UAE: UAE * - Middle East - ``aws-me-south-1`` - - Middle East, Bahrain + - Middle East, Bahrain: Bahrain * - North America - ``aws-ca-central-1`` - - Canada, Quebec + - Canada, Quebec: Canada Central * - North America - ``aws-us-east-1`` - - United States, Virginia + - United States, Virginia: N. Virginia * - North America - ``aws-us-east-2`` - - United States, Ohio + - United States, Ohio: Ohio * - North America - ``aws-us-west-1`` - - United States, California + - United States, California: N. California * - North America - ``aws-us-west-2`` - - United States, Oregon + - United States, Oregon: Oregon * - South America - ``aws-sa-east-1`` - - South America, Brazil + - South America, Brazil: São Paulo Azure ----------------------------------------------------- @@ -100,118 +100,118 @@ Azure - Description * - Africa - ``azure-south-africa-north`` - - Africa, South Africa + - Africa, South Africa: South Africa North * - Asia-Pacific - ``azure-eastasia`` - - Asia, Hong Kong + - Asia, Hong Kong: East Asia * - Asia-Pacific - ``azure-india-central`` - - Asia, India + - Asia, India: Central India * - Asia-Pacific - ``azure-india-south`` - - Asia, India + - Asia, India: South India * - Asia-Pacific - ``azure-india-west`` - - Asia, India + - Asia, India: West India * - Asia-Pacific - ``azure-japaneast`` - - Asia, Japan + - Asia, Japan: Japan East * - Asia-Pacific - ``azure-japanwest`` - - Asia, Japan + - Asia, Japan: Japan West * - Asia-Pacific - ``azure-korea-central`` - - Asia, Korea + - Asia, Korea: Korea Central * - Asia-Pacific - ``azure-korea-south`` - - Asia, Korea + - Asia, Korea: Korea South * - Asia-Pacific - ``azure-southeastasia`` - - Asia, Singapore + - Asia, Singapore: Southeast Asia * - Australia - ``azure-australia-central`` - - Australia, Canberra + - Australia, Canberra: Australia Central * - Australia - ``azure-australiaeast`` - - Australia, New South Wales + - Australia, New South Wales: Australia East * - Australia - ``azure-australiasoutheast`` - - Australia, Victoria + - Australia, Victoria: Australia Southeast * - Europe - ``azure-france-central`` - - Europe, France + - Europe, France: France Central * - Europe - ``azure-germany-north`` - - Europe, Germany + - Europe, Germany: Germany North * - Europe - ``azure-germany-westcentral`` - - Europe, Germany + - Europe, Germany: Germany West Central * - Europe - ``azure-northeurope`` - - Europe, Ireland + - Europe, Ireland: North Europe * - Europe - ``azure-norway-east`` - - Europe, Norway + - Europe, Norway: Norway East * - Europe - ``azure-norway-west`` - - Europe, Norway + - Europe, Norway: Norway West * - Europe - ``azure-sweden-central`` - - Europe, Gävle + - Europe, Gävle: Sweden Central * - Europe - ``azure-switzerland-north`` - - Europe, Switzerland + - Europe, Switzerland: Switzerland North * - Europe - ``azure-uksouth`` - - Europe, England + - Europe, England: UK South * - Europe - ``azure-ukwest`` - - Europe, Wales + - Europe, Wales: UK West * - Europe - ``azure-westeurope`` - - Europe, Netherlands + - Europe, Netherlands: West Europe * - Middle East - ``azure-qatar-central`` - - Middle East, Doha + - Middle East, Doha: Qatar Central * - Middle East - ``azure-uae-north`` - - Middle East, United Arab Emirates + - Middle East, United Arab Emirates: Middle East * - North America - ``azure-canadacentral`` - - Canada, Ontario + - Canada, Ontario: Canada Central * - North America - ``azure-canadaeast`` - - Canada, Quebec + - Canada, Quebec: Canada East * - North America - ``azure-centralus`` - - United States, Iowa + - United States, Iowa: Central US * - North America - ``azure-eastus`` - - United States, Virginia + - United States, Virginia: East US * - North America - ``azure-eastus2`` - - United States, Virginia + - United States, Virginia: East US 2 * - North America - ``azure-northcentralus`` - - United States, Illinois + - United States, Illinois: North Central US * - North America - ``azure-southcentralus`` - - United States, Texas + - United States, Texas: South Central US * - North America - ``azure-westcentralus`` - - United States, Wyoming + - United States, Wyoming: West Central US * - North America - ``azure-westus`` - - United States, California + - United States, California: West US * - North America - ``azure-westus2`` - - United States, Washington + - United States, Washington: West US 2 * - North America - ``azure-westus3`` - - United States, Phoenix + - United States, Phoenix: West US 3 * - South America - ``azure-brazilsouth`` - - South America, Brazil + - South America, Brazil: Brazil South DigitalOcean ----------------------------------------------------- @@ -223,31 +223,31 @@ DigitalOcean - Description * - Asia-Pacific - ``do-blr`` - - Asia, India + - Asia, India: Bangalore * - Asia-Pacific - ``do-sgp`` - - Asia, Singapore + - Asia, Singapore: Singapore * - Australia - ``do-syd`` - - Australia, New South Wales + - Australia, New South Wales: Sydney * - Europe - ``do-ams`` - - Europe, Netherlands + - Europe, Netherlands: Amsterdam * - Europe - ``do-fra`` - - Europe, Germany + - Europe, Germany: Frankfurt * - Europe - ``do-lon`` - - Europe, England + - Europe, England: London * - North America - ``do-nyc`` - - United States, New York + - United States, New York: New York * - North America - ``do-sfo`` - - United States, California + - United States, California: San Francisco * - North America - ``do-tor`` - - Canada, Ontario + - Canada, Ontario: Toronto Google Cloud ----------------------------------------------------- @@ -259,109 +259,118 @@ Google Cloud - Description * - Asia-Pacific - ``google-asia-east1`` - - Asia, Taiwan + - Asia, Taiwan: Taiwan * - Asia-Pacific - ``google-asia-east2`` - - Asia, Hong Kong + - Asia, Hong Kong: Hong Kong * - Asia-Pacific - ``google-asia-northeast1`` - - Asia, Japan + - Asia, Japan: Tokyo * - Asia-Pacific - ``google-asia-northeast2`` - - Asia, Japan + - Asia, Japan: Osaka * - Asia-Pacific - ``google-asia-northeast3`` - - Asia, Korea + - Asia, Korea: Seoul * - Asia-Pacific - ``google-asia-south1`` - - Asia, India + - Asia, India: Mumbai * - Asia-Pacific - ``google-asia-south2`` - - Asia, India + - Asia, India: Delhi * - Asia-Pacific - ``google-asia-southeast1`` - - Asia, Singapore + - Asia, Singapore: Singapore * - Asia-Pacific - ``google-asia-southeast2`` - - Asia, Indonesia + - Asia, Indonesia: Jakarta * - Australia - ``google-australia-southeast1`` - - Australia, New South Wales + - Australia, New South Wales: Sydney * - Australia - ``google-australia-southeast2`` - - Australia, Victoria + - Australia, Victoria: Melbourne * - Europe - ``google-europe-central2`` - - Europe, Poland + - Europe, Poland: Warsaw * - Europe - ``google-europe-north1`` - - Europe, Finland + - Europe, Finland: Finland * - Europe - ``google-europe-southwest1`` - - Europe, Madrid + - Europe, Madrid: Spain * - Europe - ``google-europe-west1`` - - Europe, Belgium + - Europe, Belgium: Belgium * - Europe - ``google-europe-west2`` - - Europe, England + - Europe, England: London * - Europe - ``google-europe-west3`` - - Europe, Germany + - Europe, Germany: Frankfurt * - Europe - ``google-europe-west4`` - - Europe, Netherlands + - Europe, Netherlands: Netherlands * - Europe - ``google-europe-west6`` - - Europe, Switzerland + - Europe, Switzerland: Zürich * - Europe - ``google-europe-west8`` - - Europe, Italy + - Europe, Italy: Milan * - Europe - ``google-europe-west9`` - - Europe, France + - Europe, France: Paris + * - Europe + - ``google-europe-west12`` + - Europe, Italy: Turin + * - Middle East + - ``google-me-central1`` + - Middle East, Qatar: Doha + * - Middle East + - ``google-me-central2`` + - Middle East, Saudi Arabia: Dammam * - Middle East - ``google-me-west1`` - - Middle East, Israel + - Middle East, Israel: Tel Aviv * - North America - ``google-northamerica-northeast1`` - - Canada, Quebec + - Canada, Quebec: Montréal * - North America - ``google-northamerica-northeast2`` - - Canada, Ontario + - Canada, Ontario: Toronto * - North America - ``google-us-central1`` - - United States, Iowa + - United States, Iowa: Iowa * - North America - ``google-us-east1`` - - United States, South Carolina + - United States, South Carolina: South Carolina * - North America - ``google-us-east4`` - - United States, Virginia + - United States, Virginia: Northern Virginia * - North America - ``google-us-east5`` - - United States, Ohio + - United States, Ohio: Columbus * - North America - ``google-us-south1`` - - United States, Texas + - United States, Texas: Dallas * - North America - ``google-us-west1`` - - United States, Oregon + - United States, Oregon: Oregon * - North America - ``google-us-west2`` - - United States, California + - United States, California: Los Angeles * - North America - ``google-us-west3`` - - United States, Utah + - United States, Utah: Salt Lake City * - North America - ``google-us-west4`` - - United States, Nevada + - United States, Nevada: Las Vegas * - South America - ``google-southamerica-east1`` - - South America, Brazil + - South America, Brazil: Sao Paulo * - South America - ``google-southamerica-west1`` - - South America, Chile + - South America, Chile: Santiago UpCloud ----------------------------------------------------- @@ -373,40 +382,78 @@ UpCloud - Description * - Asia-Pacific - ``upcloud-sg-sin`` - - Asia, Singapore + - Asia, Singapore: Singapore * - Australia - ``upcloud-au-syd`` - - Australia, New South Wales + - Australia, New South Wales: Sydney * - Europe - ``upcloud-de-fra`` - - Europe, Germany + - Europe, Germany: Frankfurt * - Europe - ``upcloud-es-mad`` - - Europe, Spain + - Europe, Spain: Madrid * - Europe - ``upcloud-fi-hel`` - - Europe, Finland + - Europe, Finland: Helsinki * - Europe - ``upcloud-fi-hel1`` - - Europe, Finland + - Europe, Finland: Helsinki * - Europe - ``upcloud-fi-hel2`` - - Europe, Finland + - Europe, Finland: Helsinki * - Europe - ``upcloud-nl-ams`` - - Europe, Netherlands + - Europe, Netherlands: Amsterdam * - Europe - ``upcloud-pl-waw`` - - Europe, Poland + - Europe, Poland: Warsaw * - Europe - ``upcloud-se-sto`` - - Europe, Sweden + - Europe, Sweden: Stockholm * - North America - ``upcloud-us-chi`` - - United States, Illinois + - United States, Illinois: Chicago * - North America - ``upcloud-us-nyc`` - - United States, New York + - United States, New York: New York * - North America - ``upcloud-us-sjo`` - - United States, California \ No newline at end of file + - United States, California: San Jose + +Oracle Cloud Infrastructure +----------------------------------------------------- + +.. important:: + + Oracle Cloud Infrastructure (OCI) is supported on the Aiven platform as a :doc:`limited availability feature `. For more information or access, contact the sales team at sales@Aiven.io. + +.. list-table:: + :header-rows: 1 + + * - Region + - Cloud + - Description + * - Asia-Pacific + - ``ap-mumbai-1`` + - India, India West: Mumbai + * - Asia-Pacific + - ``ap-sydney-1`` + - Australia, Australia East: Sydney + * - Europe + - ``eu-frankfurt-1`` + - Germany, Germany Central: Frankfurt + * - Europe + - ``uk-london-1`` + - United Kingdom, UK South: London + * - Middle East + - ``me-dubai-1`` + - UAE, UAE East: Dubai + * - North America + - ``us-ashburn-1`` + - US East, Virginia: Ashburn + * - North America + - ``us-phoenix-1`` + - US West, Arizona: Phoenix + * - South America + - ``sa-saopaulo-1`` + - Brazil, Brazil East: São Paulo \ No newline at end of file diff --git a/includes/config-cassandra.rst b/includes/config-cassandra.rst index 3b938549d5..e38e6d1946 100644 --- a/includes/config-cassandra.rst +++ b/includes/config-cassandra.rst @@ -7,6 +7,14 @@ +``service_log`` +--------------- +*['boolean', 'null']* + +**Service logging** Store logs for the service so that they are available in the HTTP API and console. + + + ``static_ips`` -------------- *boolean* diff --git a/includes/config-clickhouse.rst b/includes/config-clickhouse.rst index 6009249852..64d6766a1b 100644 --- a/includes/config-clickhouse.rst +++ b/includes/config-clickhouse.rst @@ -7,6 +7,14 @@ +``service_log`` +--------------- +*['boolean', 'null']* + +**Service logging** Store logs for the service so that they are available in the HTTP API and console. + + + ``static_ips`` -------------- *boolean* diff --git a/includes/config-dragonfly.rst b/includes/config-dragonfly.rst new file mode 100644 index 0000000000..e69de29bb2 diff --git a/includes/config-flink.rst b/includes/config-flink.rst index ebc6c4c9c2..e9cde74aec 100644 --- a/includes/config-flink.rst +++ b/includes/config-flink.rst @@ -7,6 +7,14 @@ +``service_log`` +--------------- +*['boolean', 'null']* + +**Service logging** Store logs for the service so that they are available in the HTTP API and console. + + + ``static_ips`` -------------- *boolean* diff --git a/includes/config-grafana.rst b/includes/config-grafana.rst index efd8e9a596..50b1886625 100644 --- a/includes/config-grafana.rst +++ b/includes/config-grafana.rst @@ -1,3 +1,5 @@ + + ``custom_domain`` ----------------- *['string', 'null']* @@ -14,6 +16,14 @@ +``service_log`` +--------------- +*['boolean', 'null']* + +**Service logging** Store logs for the service so that they are available in the HTTP API and console. + + + ``static_ips`` -------------- *boolean* @@ -244,6 +254,12 @@ **Automatically sign-up users on successful sign-in** +``auto_login`` +~~~~~~~~~~~~~~ +*boolean* + +**Allow users to bypass the login screen and automatically log in** + ``client_id`` ~~~~~~~~~~~~~ *string* @@ -268,6 +284,12 @@ **Require users to belong to one of given organizations** +``skip_org_role_sync`` +~~~~~~~~~~~~~~~~~~~~~~ +*boolean* + +**Stop automatically syncing user roles** + ``auth_gitlab`` diff --git a/includes/config-influxdb.rst b/includes/config-influxdb.rst index 3c41fef68a..b67e75ebc5 100644 --- a/includes/config-influxdb.rst +++ b/includes/config-influxdb.rst @@ -1,11 +1,3 @@ -.. - ``additional_backup_regions`` - ----------------------------- - *array* - - **Additional Cloud Regions for Backup Replication** - - ``custom_domain`` ----------------- @@ -23,6 +15,14 @@ +``service_log`` +--------------- +*['boolean', 'null']* + +**Service logging** Store logs for the service so that they are available in the HTTP API and console. + + + ``static_ips`` -------------- *boolean* diff --git a/includes/config-kafka.rst b/includes/config-kafka.rst index 3e318fdc8b..05cfe6d622 100644 --- a/includes/config-kafka.rst +++ b/includes/config-kafka.rst @@ -1,5 +1,4 @@ - ``custom_domain`` ----------------- *['string', 'null']* @@ -16,6 +15,14 @@ +``service_log`` +--------------- +*['boolean', 'null']* + +**Service logging** Store logs for the service so that they are available in the HTTP API and console. + + + ``static_ips`` -------------- *boolean* @@ -420,6 +427,12 @@ **transaction.remove.expired.transaction.cleanup.interval.ms** The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)). +``transaction_partition_verification_enable`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +*boolean* + +**transaction.partition.verification.enable** Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition + ``kafka_authentication_methods`` @@ -550,7 +563,7 @@ ~~~~~~~~~~~~~~~~~~~~~~ *integer* -**The timeout used to detect failures when using Kafka's group management facilities** The timeout in milliseconds used to detect failures when using Kafka's group management facilities (defaults to 10000). +**The timeout used to detect failures when using Kafka’s group management facilities** The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000). @@ -634,6 +647,12 @@ **consumer.request.timeout.ms** The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached +``name_strategy_validation`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +*boolean* + +**name.strategy.validation** If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. + ``simpleconsumer_pool_size_max`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ *integer* diff --git a/includes/config-kafka_connect.rst b/includes/config-kafka_connect.rst index 150889af93..30e00ebb11 100644 --- a/includes/config-kafka_connect.rst +++ b/includes/config-kafka_connect.rst @@ -1,6 +1,4 @@ - - ``ip_filter`` ------------- *array* @@ -9,6 +7,14 @@ +``service_log`` +--------------- +*['boolean', 'null']* + +**Service logging** Store logs for the service so that they are available in the HTTP API and console. + + + ``static_ips`` -------------- *boolean* @@ -117,7 +123,7 @@ ~~~~~~~~~~~~~~~~~~~~~~ *integer* -**The timeout used to detect failures when using Kafka's group management facilities** The timeout in milliseconds used to detect failures when using Kafka's group management facilities (defaults to 10000). +**The timeout used to detect failures when using Kafka’s group management facilities** The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000). diff --git a/includes/config-kafka_mirrormaker.rst b/includes/config-kafka_mirrormaker.rst index 2801afe2f9..7801964225 100644 --- a/includes/config-kafka_mirrormaker.rst +++ b/includes/config-kafka_mirrormaker.rst @@ -1,11 +1,3 @@ -.. - ``additional_backup_regions`` - ----------------------------- - *array* - - **Additional Cloud Regions for Backup Replication** - - ``ip_filter`` ------------- @@ -15,6 +7,14 @@ +``service_log`` +--------------- +*['boolean', 'null']* + +**Service logging** Store logs for the service so that they are available in the HTTP API and console. + + + ``static_ips`` -------------- *boolean* @@ -89,5 +89,23 @@ **Maximum number of MirrorMaker tasks (of each type) per service CPU** 'tasks.max' is set to this multiplied by the number of CPUs in the service. +``offset_lag_max`` +~~~~~~~~~~~~~~~~~~ +*integer* + +**Maximum offset lag before it is resynced** How out-of-sync a remote partition can be before it is resynced. + +``groups`` +~~~~~~~~~~ +*string* + +**Comma-separated list of consumer groups to replicate** Consumer groups to replicate. Supports comma-separated group IDs and regexes. + +``groups_exclude`` +~~~~~~~~~~~~~~~~~~ +*string* + +**Comma-separated list of group IDs and regexes to exclude from replication** Exclude groups. Supports comma-separated group IDs and regexes. Excludes take precedence over includes. + diff --git a/includes/config-m3aggregator.rst b/includes/config-m3aggregator.rst index 98fb429cf6..399a14d66e 100644 --- a/includes/config-m3aggregator.rst +++ b/includes/config-m3aggregator.rst @@ -15,6 +15,14 @@ +``service_log`` +--------------- +*['boolean', 'null']* + +**Service logging** Store logs for the service so that they are available in the HTTP API and console. + + + ``static_ips`` -------------- *boolean* diff --git a/includes/config-m3db.rst b/includes/config-m3db.rst index cfc5c7697a..2619940a85 100644 --- a/includes/config-m3db.rst +++ b/includes/config-m3db.rst @@ -15,6 +15,14 @@ +``service_log`` +--------------- +*['boolean', 'null']* + +**Service logging** Store logs for the service so that they are available in the HTTP API and console. + + + ``static_ips`` -------------- *boolean* @@ -116,14 +124,6 @@ **Allow clients to connect to m3coordinator from the public internet for service nodes that are in a project VPC or another type of private network** -.. - ``additional_backup_regions`` - ----------------------------- - *array* - - **Additional Cloud Regions for Backup Replication** - - ``m3_version`` -------------- diff --git a/includes/config-mysql.rst b/includes/config-mysql.rst index c7721cb0c5..deb0c300eb 100644 --- a/includes/config-mysql.rst +++ b/includes/config-mysql.rst @@ -1,11 +1,3 @@ -.. - ``additional_backup_regions`` - ----------------------------- - *array* - - **Additional Cloud Regions for Backup Replication** - - ``ip_filter`` ------------- @@ -15,6 +7,14 @@ +``service_log`` +--------------- +*['boolean', 'null']* + +**Service logging** Store logs for the service so that they are available in the HTTP API and console. + + + ``static_ips`` -------------- *boolean* diff --git a/includes/config-opensearch.rst b/includes/config-opensearch.rst index 663f0717ab..07c418d0cb 100644 --- a/includes/config-opensearch.rst +++ b/includes/config-opensearch.rst @@ -34,6 +34,14 @@ +``service_log`` +--------------- +*['boolean', 'null']* + +**Service logging** Store logs for the service so that they are available in the HTTP API and console. + + + ``static_ips`` -------------- *boolean* @@ -114,13 +122,13 @@ ~~~~~~~~~~~~~ *['string', 'null']* -**The key in the JSON payload that stores the user's roles** The key in the JSON payload that stores the user's roles. The value of this key must be a comma-separated list of roles. Required only if you want to use roles in the JWT +**The key in the JSON payload that stores the user’s roles** The key in the JSON payload that stores the user’s roles. The value of this key must be a comma-separated list of roles. Required only if you want to use roles in the JWT ``subject_key`` ~~~~~~~~~~~~~~~ *['string', 'null']* -**The key in the JSON payload that stores the user's name** The key in the JSON payload that stores the user's name. If not defined, the subject registered claim is used. Most IdP providers use the preferred_username claim. Optional. +**The key in the JSON payload that stores the user’s name** The key in the JSON payload that stores the user’s name. If not defined, the subject registered claim is used. Most IdP providers use the preferred_username claim. Optional. ``jwt_header`` ~~~~~~~~~~~~~~ @@ -318,6 +326,12 @@ **Opensearch Security Plugin Settings** +``enable_security_audit`` +~~~~~~~~~~~~~~~~~~~~~~~~~ +*boolean* + +**Enable/Disable security audit** + ``thread_pool_search_size`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ *integer* @@ -416,55 +430,55 @@ ``email_sender_name`` ~~~~~~~~~~~~~~~~~~~~~ -*['string']* +*string* **Sender name placeholder to be used in Opensearch Dashboards and Opensearch keystore** This should be identical to the Sender name defined in Opensearch dashboards ``email_sender_username`` ~~~~~~~~~~~~~~~~~~~~~~~~~ -*['string']* +*string* **Sender username for Opensearch alerts** ``email_sender_password`` ~~~~~~~~~~~~~~~~~~~~~~~~~ -*['string']* +*string* **Sender password for Opensearch alerts to authenticate with SMTP server** Sender password for Opensearch alerts to authenticate with SMTP server ``ism_enabled`` ~~~~~~~~~~~~~~~ -*['boolean', 'null']* +*boolean* **Specifies whether ISM is enabled or not** ``ism_history_enabled`` ~~~~~~~~~~~~~~~~~~~~~~~ -*['boolean', 'null']* +*boolean* **Specifies whether audit history is enabled or not. The logs from ISM are automatically indexed to a logs document.** ``ism_history_max_age`` ~~~~~~~~~~~~~~~~~~~~~~~ -*['integer', 'null']* +*integer* **The maximum age before rolling over the audit history index in hours** ``ism_history_max_docs`` ~~~~~~~~~~~~~~~~~~~~~~~~ -*['integer', 'null']* +*integer* **The maximum number of documents before rolling over the audit history index.** ``ism_history_rollover_check_period`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -*['integer', 'null']* +*integer* **The time between rollover checks for the audit history index in hours.** ``ism_history_rollover_retention_period`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -*['integer', 'null']* +*integer* **How long audit history indices are kept in days.** diff --git a/includes/config-redis.rst b/includes/config-redis.rst index 9d1dce56db..7573e29add 100644 --- a/includes/config-redis.rst +++ b/includes/config-redis.rst @@ -1,9 +1,3 @@ -.. - ``additional_backup_regions`` - ----------------------------- - *array* - - **Additional Cloud Regions for Backup Replication** @@ -15,6 +9,14 @@ +``service_log`` +--------------- +*['boolean', 'null']* + +**Service logging** Store logs for the service so that they are available in the HTTP API and console. + + + ``static_ips`` -------------- *boolean* diff --git a/includes/services-memory-capped.rst b/includes/services-memory-capped.rst index 964cfd2dec..1423e4c336 100644 --- a/includes/services-memory-capped.rst +++ b/includes/services-memory-capped.rst @@ -9,10 +9,8 @@ For data services with unbounded memory allocation, a memory limit is placed on This **service memory** can be calculated as: - |service_memory| +|service_memory| .. important:: Reserved memory for non-service use is capped to a maximum of 4GB. For MySQL, a 600MB minimum is always guaranteed. - - diff --git a/index.rst b/index.rst index fd3034231c..1dad969650 100644 --- a/index.rst +++ b/index.rst @@ -86,6 +86,20 @@ Get started with Aiven's fully-managed services. Learn about ClickHouse + .. grid-item-card:: + :shadow: md + :margin: 2 2 0 0 + + |icon-dragonfly| **Dragonfly** + + Advanced, scalable in-memory data store for high-performance computing needs. + + .. button-link:: docs/products/dragonfly + :color: primary + :outline: + + Learn about Dragonfly + .. grid-item-card:: :shadow: md :margin: 2 2 0 0 @@ -239,7 +253,7 @@ Automation A public API you can use for programmatic integrations. - .. button-link:: docs/tools/api + .. button-link:: https://docs.aiven.io/docs/tools/api :color: primary :outline: diff --git a/requirements.txt b/requirements.txt index 660f517c3d..cf297ace4a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,4 +15,6 @@ sphinx-sitemap==2.2.0 sphinx-notfound-page==0.8 pygments==2.15.0 python-dotenv==0.21.0 -algoliasearch==3.0.0 \ No newline at end of file +algoliasearch==3.0.0 +natsort==8.4.0 +Jinja2==3.1.3 diff --git a/scripts/aiven/clouds.py b/scripts/aiven/clouds.py index b98f48d414..6a7a2a120a 100644 --- a/scripts/aiven/clouds.py +++ b/scripts/aiven/clouds.py @@ -1,24 +1,69 @@ -import requests import argparse -from typing import Dict +import jinja2 +import re +import requests +from dataclasses import dataclass +from typing import cast, Self +from natsort import natsorted + +CLOUD_ENTRIES_TEMPLATE = """\ +{% set state = namespace(prev_cloud_vendor_code=None) %} +{%- for cloud_entry in cloud_entries -%} +{% if cloud_entry.vendor_code != state.prev_cloud_vendor_code %} +{% set state.prev_cloud_vendor_code = cloud_entry.vendor_code %} +{{ cloud_entry.vendor_name }} +----------------------------------------------------- +.. list-table:: + :header-rows: 1 + * - Region + - Cloud + - Description +{%- endif %} + * - {{ cloud_entry.geo_region }} + - ``{{ cloud_entry.name }}`` + - {{ cloud_entry.description }} + {%- endfor -%} +""" -def create_cloud_entry(cloud: Dict) -> str: - """Creates cloud entry with formatted info. - :param cloud: contains relevant info about cloud - :returns: formatted string with cloud info - :rtype: str - """ - entry = "" - # Printing in title case to make it look better - entry += f' * - {cloud["geo_region"].title()}' - entry += "\n" - entry += f' - ``{cloud["cloud_name"]}``' - entry += "\n" - prefix = cloud["cloud_description"][0 : cloud["cloud_description"].find("-")] - entry += f" - {prefix}" - return entry +@dataclass +class CloudEntry: + description: str + geo_region: str + name: str + vendor_code: str + vendor_name: str + + @classmethod + def from_dict(cls: type[Self], cloud: dict[str, str | float], /) -> Self: + """Create cloud entry from dict + + :param cloud: contains relevant info about cloud + :rtype: CloudEntry + """ + + description_parts = [ + description_part.strip() + for description_part in re.split( + r"[,:-]", cast(str, cloud["cloud_description"]) + ) + ] + vendor_name = description_parts.pop(2) + description = ( + f"{description_parts[0]}, {description_parts[1]}: {description_parts[2]}" + ) + cloud_name = cast(str, cloud["cloud_name"]) + vendor_code = cloud_name[0 : cloud_name.index("-")] + return cls( + description=description, + geo_region=cast( + str, cloud["geo_region"] + ).title(), # Printing in title case to make it look better + name=cloud_name, + vendor_code=vendor_code, + vendor_name=vendor_name, + ) def main(): @@ -33,50 +78,12 @@ def main(): response = requests.get("https://api.aiven.io/v1/clouds") data = response.json()["clouds"] - # Sorting the data by vendor and region - # * Vendor is contained in the cloud_name field, between the start and the '-' symbol - # * geographical region is contained in the geo_region field - # * the cloud name itself is contained in the cloud_name field - data = sorted( - data, - key=lambda k: k["cloud_name"][0 : k["cloud_name"].find("-")] - + " " - + k["geo_region"] - + k["cloud_name"], + cloud_entries = natsorted( + (CloudEntry.from_dict(cloud) for cloud in data), + key=lambda cloud: (cloud.vendor_code, cloud.geo_region, cloud.name), ) - # This helps creating a new section every time there is a change in the Cloud vendor - prev_cloud = None - res = "" - for cloud in data: - # Extracting the cloud vendor information available in the cloud_description field between the `-` symbol and the `:` symbol - curr_cloud = cloud["cloud_description"][ - cloud["cloud_description"].find("-") - + 2 : cloud["cloud_description"].find(":") - ] - res += "\n" - # If current_cloud is different than the previous cloud, let's create a new title, section, table - if curr_cloud != prev_cloud: - prev_cloud = curr_cloud - res += "\n" - res += curr_cloud - res += "\n" - res += "-----------------------------------------------------" - res += "\n" - - res += ".. list-table::" - res += "\n" - res += " :header-rows: 1" - res += "\n\n" - - res += " * - Region" - res += "\n" - res += " - Cloud" - res += "\n" - res += " - Description" - res += "\n" - - res += create_cloud_entry(cloud) + res = jinja2.Template(CLOUD_ENTRIES_TEMPLATE).render(cloud_entries=cloud_entries) with open(filename, "w") as text_file: text_file.write(res) diff --git a/scripts/index_algolia.py b/scripts/index_algolia.py index c9b208d5f4..a683cc8049 100644 --- a/scripts/index_algolia.py +++ b/scripts/index_algolia.py @@ -44,6 +44,10 @@ def parse_pages(html_build_dir): for admonition in elements.select('div.admonition'): admonition.decompose() + # remove code block + for code in elements.select('div.highlight-shell.notranslate'): + code.decompose() + # remove tables of contents for toc in elements.select('div.toctree-wrapper'): toc.decompose()