diff --git a/.github/vale/dicts/aiven.dic b/.github/vale/dicts/aiven.dic index d7ce4f51ef..bf5df58e5d 100644 --- a/.github/vale/dicts/aiven.dic +++ b/.github/vale/dicts/aiven.dic @@ -23,6 +23,7 @@ boolean boot Bucardo business +BYOC Cassandra Centrify checkpointing @@ -107,6 +108,7 @@ Instana IdP IdPs io +iops IPsec iptables Java @@ -170,6 +172,7 @@ PgBouncer PGHoard pglookout pgoutput +pgvector plaintext plc PNG/S @@ -251,6 +254,7 @@ untrusted unaggregated UpCloud upsert +vectorizing VM VMs VPC/MS diff --git a/.github/vale/styles/Aiven/capitalization_headings.yml b/.github/vale/styles/Aiven/capitalization_headings.yml index 1a1a8bfaa6..0e2c76fd60 100644 --- a/.github/vale/styles/Aiven/capitalization_headings.yml +++ b/.github/vale/styles/Aiven/capitalization_headings.yml @@ -15,12 +15,13 @@ exceptions: - Aiven Console - Apache - AWS Transit Gateway + - AWS Marketplace - Auth0 - Azure - Azure Marketplace - Boot - Business - - BYOA + - BYOC - Cassandra - ClickHouse - CloudWatch @@ -94,6 +95,7 @@ exceptions: - Pagila - pgAdmin - PgBouncer + - pgvector - PostgreSQL - Postman - Premium diff --git a/Makefile b/Makefile index 11ed46428e..c5aa585964 100644 --- a/Makefile +++ b/Makefile @@ -21,6 +21,13 @@ help: %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) +# This will overwrite the generated sitemap bu sphinx_sitemap to +# exclude index.html or .html extension in the . This is to prevent +# redirect loop (issue for search engine) since Cloudflare Pages redirect all .html to it's parent +html: + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + python "$(SOURCEDIR)/scripts/postprocess_sitemap.py" + livehtml: sphinx-autobuild "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/_redirects b/_redirects index 1f256e2330..1f9c91ec2b 100644 --- a/_redirects +++ b/_redirects @@ -55,6 +55,7 @@ /docs/products/postgresql/reference/high-cpu-load-of-pgbouncer.html /docs/products/postgresql/troubleshooting/troubleshooting-connection-pooling.html /docs/products/clickhouse/concepts/databases-and-tables.html /docs/products/clickhouse/howto/manage-databases-tables.html /docs/products/clickhouse/howto/integrate-pg.html /docs/products/clickhouse/howto/integrate-postgresql.html +/docs/platform/concepts/byoa.html /docs/platform/concepts/byoc.html # Redirect from .index.html to specific page names for landing diff --git a/_static/css/aiven.css b/_static/css/aiven.css index 93a0dce362..d7f16120d7 100644 --- a/_static/css/aiven.css +++ b/_static/css/aiven.css @@ -20,26 +20,35 @@ src: url(../fonts/Inter-Medium.ttf); } -.sidebar-drawer { - border-right: none; +@font-face { + font-family: Poppins; + src: url(../fonts/Poppins-SemiBold.ttf); } h1, h2, h3 { color: var(--color-foreground-primary); - font-family: InterBold; + font-family: "Poppins",sans-serif; } p { color: var(--color-content-foreground); - font-family: Inter; + font-family: "Inter",sans-serif; } a { color: var(--color-link); - text-decoration-color: var(--color-link); - font-family: Inter; + font-family: "Inter",sans-serif; + text-decoration-color: var(--color-link--hover); +} + +a:hover { + color: var(--color-link--hover); +} + +a.muted-link { + text-decoration: underline; } .page { @@ -107,16 +116,6 @@ a { width: 140px; } -.topnavbar-button-primary { - color: var(--color-topnav-button-primary); - background: var(--color-topnav-button-primary-background); -} - -.topnavbar-button-primary:hover { - color: var(--color-topnav-button-primary-hover); - background: var(--color-topnav-button-primary-hover-background); -} - .topnavbar-button-secondary { color: var(--color-topnav-button-secondary); border-color: var(--color-topnav-button-secondary-border); @@ -133,22 +132,50 @@ a { width: 100%; max-width: max-content; padding: 8px 24px; - border: 1px solid; + border: 1px solid var(--color-topnav-button-secondary-border); border-radius: 2px; cursor: pointer; text-decoration: none !important; + font-weight: 600; } .topnavbar-button-large:hover { text-decoration: none !important; } +.sd-btn.sd-btn-primary, +.sd-btn.sd-btn-outline-primary { + border-radius: 2px; + border-color: var(--color-topnav-button-secondary-border) !important; +} + +.sd-btn.sd-btn-primary:hover, +.sd-btn.sd-btn-outline-primary:hover, +.sd-btn.sd-btn-outline-primary:focus { + color: var(--color-topnav-button-secondary) !important; + border-color: var(--color-topnav-button-secondary-hover-border) !important; + background-color: transparent !important; +} + +.topnavbar-button-primary { + color: var(--color-topnav-button-primary); + background: var(--color-topnav-button-primary-background); + border: 1px solid var(--color-topnav-button-primary-background); +} + +.topnavbar-button-primary:hover { + color: var(--color-topnav-button-primary-hover); + background: var(--color-topnav-button-primary-hover-background); + border: 1px solid var(--color-topnav-button-primary-hover-background); +} + .topnavbar-link { color: var(--color-topnav-link); text-decoration: none; } .topnavbar-link:hover { + color: var(--color-topnav-link); text-decoration: underline; text-decoration-color: var(--color-topnav-link); } @@ -193,10 +220,6 @@ a { top: var(--topnavbar-height); } -.sidebar-tree .current-page > .reference { - font-weight: 400; -} - .sidebar-tree label:hover { background: none; } @@ -213,25 +236,25 @@ a { } .sidebar-tree .reference:hover { - background: none; - border-left: 1px solid var(--color-link); + border-left: 1px solid var(--color-brand-primary); font-family: Inter; - color: var(--color-link); + color: var(--color-sidebar-link-text); + font-weight: 600; } -.sidebar-tree .current-page > .reference { +.sidebar-tree .current > .reference { background: none !important; - border-left: 1px solid var(--color-link); - color: var(--color-link) !important; - font-weight: 400 !important; } -.sidebar-tree .current > .reference { - background: none !important; +.sidebar-tree .current-page > .reference { + font-weight: 600; + background: var(--color-sidebar-background-hover) !important; + border-left: 1px solid var(--color-brand-primary); + color: var(--color-sidebar-link-text) !important; } .sidebar-tree .current-page > .reference:hover { - background: none !important; + background: var(--color-sidebar-background-hover); } .sidebar-tree > .reference, @@ -271,7 +294,11 @@ a { .sidebar-search:focus { color: var(--color-search-focused); border-color: var(--color-search-border-focused); - outline: 2px solid var(--color-search-container-outline-focused); + outline: none; +} + +.sidebar-search::placeholder { + color: var(--color-search-focused); } .sidebar-search-container::before { @@ -353,17 +380,6 @@ h3 > code > .pre { top: var(--topnavbar-height) !important; } -.toctree-wrapper .reference { - font-family: Inter; - color: var(--color-sidebar-link-text); - text-decoration-color: var(--color-sidebar-link-text); -} - -.toctree-wrapper .reference:hover { - color: var(--color-link); - text-decoration-color: var(--color-link); -} - .toctree-wrapper > ul > li.toctree-l1 { list-style: none !important; margin: 12px; diff --git a/_static/fonts/Poppins-SemiBold.ttf b/_static/fonts/Poppins-SemiBold.ttf new file mode 100644 index 0000000000..74c726e327 Binary files /dev/null and b/_static/fonts/Poppins-SemiBold.ttf differ diff --git a/_static/images/logoDarkPride.png b/_static/images/logoDarkPride.png new file mode 100644 index 0000000000..f192917967 Binary files /dev/null and b/_static/images/logoDarkPride.png differ diff --git a/_static/images/logoLightPride.png b/_static/images/logoLightPride.png new file mode 100644 index 0000000000..3543de29f1 Binary files /dev/null and b/_static/images/logoLightPride.png differ diff --git a/_toc.yml b/_toc.yml index 3ca667a17d..1e548ef26a 100644 --- a/_toc.yml +++ b/_toc.yml @@ -17,10 +17,10 @@ entries: - file: docs/platform/concepts/list-billing title: Billing entries: + - file: docs/platform/concepts/hourly-billing-model - file: docs/platform/concepts/tax-information - file: docs/platform/concepts/billing-groups - file: docs/platform/concepts/corporate-billing - - file: docs/platform/concepts/hourly-billing-model - file: docs/platform/concepts/beta_services - file: docs/platform/concepts/cloud-security - file: docs/platform/concepts/logs-metrics-alerts @@ -32,7 +32,7 @@ entries: - file: docs/platform/concepts/out-of-memory-conditions - file: docs/platform/concepts/static-ips - file: docs/platform/concepts/tls-ssl-certificates - - file: docs/platform/concepts/byoa + - file: docs/platform/concepts/byoc - file: docs/platform/concepts/dynamic-disk-sizing - file: docs/platform/concepts/enhanced-compliance-env - file: docs/platform/concepts/disaster-recovery-test-scenarios @@ -44,12 +44,15 @@ entries: - file: docs/platform/howto title: HowTo entries: + - file: docs/platform/howto/feature-preview - file: docs/platform/howto/list-user - title: User and authentication management + title: User and access management entries: - - file: docs/platform/howto/add-authentication-method + - file: docs/platform/howto/edit-user-profile - file: docs/platform/howto/change-your-email-address + - file: docs/platform/howto/add-authentication-method - file: docs/platform/howto/create_authentication_token + - file: docs/platform/howto/manage-org-users - file: docs/platform/howto/create_new_service_user - file: docs/tools/aiven-console/howto/create-manage-teams - file: docs/platform/howto/user-2fa @@ -78,6 +81,7 @@ entries: - file: docs/platform/howto/search-services title: Search for services - file: docs/platform/howto/access-service-logs + - file: docs/platform/howto/service-metrics - file: docs/platform/howto/prepare-for-high-load - file: docs/platform/howto/create-service-integration - file: docs/platform/howto/list-network @@ -114,12 +118,18 @@ entries: - file: docs/platform/howto/billing-assign-projects - file: docs/platform/howto/payment-issues-plan-upgrades - file: docs/platform/howto/custom-plans + - file: docs/platform/howto/billing-aws-marketplace-subscription + title: Set up AWS Marketplace + - file: docs/platform/howto/move-to-aws-marketplace-billing + title: Move to AWS Marketplace - file: docs/platform/howto/billing-google-cloud-platform-marketplace-subscription title: Set up Google Cloud Marketplace - file: docs/platform/howto/move-to-gcp-marketplace-billing title: Move to Google Cloud Marketplace - file: docs/platform/howto/billing-azure-marketplace-subscription title: Set up Azure Marketplace + - file: docs/platform/howto/move-to-azure-marketplace-billing + title: Move to Azure Marketplace - file: docs/platform/howto/list-saml title: SAML Authentication entries: @@ -221,6 +231,7 @@ entries: title: Upgrade the Aiven Terraform Provider from v2 to v3 - file: docs/tools/terraform/howto/upgrade-provider-v3-v4 title: Upgrade the Aiven Terraform Provider from v3 to v4 + - file: docs/tools/terraform/howto/update-deprecated-resources - file: docs/tools/terraform/howto/config-postgresql-provider title: Use PostgreSQL provider alongside Aiven Terraform Provider - file: docs/tools/terraform/howto/promote-to-master-pg-rr @@ -228,6 +239,7 @@ entries: - file: docs/tools/terraform/howto/upgrade-to-opensearch - file: docs/tools/terraform/howto/vnet-peering-azure - file: docs/tools/terraform/howto/vpc-peering-gcp + - file: docs/tools/terraform/howto/vpc-peering-aws - file: docs/tools/terraform/concepts title: Concepts entries: @@ -297,6 +309,9 @@ entries: - file: docs/products/kafka/concepts/auth-types - file: docs/products/kafka/concepts/non-leader-for-partition - file: docs/products/kafka/concepts/configuration-backup + - file: docs/products/kafka/concepts/monitor-consumer-group + - file: docs/products/kafka/concepts/kafka-quotas + title: Quotas - file: docs/products/kafka/howto title: HowTo entries: @@ -348,6 +363,7 @@ entries: - file: docs/products/kafka/howto/prevent-full-disks - file: docs/products/kafka/howto/use-zookeeper - file: docs/products/kafka/howto/avoid-out-of-memory-error + - file: docs/products/kafka/howto/manage-quotas - file: docs/products/kafka/howto/list-integration title: Integrations @@ -993,7 +1009,7 @@ entries: - file: docs/products/opensearch/howto/opensearch-aggregations-and-nodejs title: Aggregation with NodeJS - file: docs/products/opensearch/howto/list-opensearch-security - title: Manage OpenSearch Security |beta| + title: Manage OpenSearch Security entries: - file: docs/products/opensearch/howto/enable-opensearch-security title: Enable OpenSearch Security management @@ -1070,6 +1086,8 @@ entries: title: TimescaleDB - file: docs/products/postgresql/concepts/upgrade-failover title: Upgrade and failover procedures + - file: docs/products/postgresql/concepts/pgvector + title: AI-powered search with pgvector - file: docs/products/postgresql/howto title: HowTo entries: @@ -1110,6 +1128,8 @@ entries: - file: docs/products/postgresql/howto/optimize-pg-slow-queries - file: docs/products/postgresql/howto/check-avoid-transaction-id-wraparound - file: docs/products/postgresql/howto/prevent-full-disk + - file: docs/products/postgresql/howto/use-pgvector + title: Enable and use pgvector - file: docs/products/postgresql/howto/list-replication-migration title: Migrate entries: diff --git a/conf.py b/conf.py index ffbe2f93a3..dc4be3bf20 100644 --- a/conf.py +++ b/conf.py @@ -14,13 +14,13 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. # import os -# import sys -# sys.path.insert(0, os.path.abspath('.')) +import sys +sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- project = 'Aiven Docs' -copyright = '2022, Aiven Team' +copyright = '2023, Aiven Team' author = 'Aiven Team' html_title = 'Aiven' @@ -38,6 +38,7 @@ 'sphinx_gitstamp', 'sphinxext.opengraph', 'notfound.extension', + 'override_canonical', ] # Not Found configuration @@ -98,81 +99,96 @@ "light_logo": "images/logoLight.png", "dark_logo": "images/logoDark.png", "light_css_variables": { - "color-brand-primary": "#c60443", - "color-brand-content": "#4a4b57", - "color-link": "#c60443", - "color-sidebar-link-text": "#4a4b57", - "color-sidebar-link-text--top-level": "#4a4b57", + "color-brand-primary": "#FF5200", + "color-brand-content": "#343745", + "color-link": "#007cc2", + "color-link--hover": "#016BB2", + "color-link-underline--hover": "#016BB2", + "color-sidebar-link-text": "#343745", + "color-sidebar-link-text--top-level": "#343745", "font-stack": "Inter, sans-serif", - "color-sidebar-brand-text": "#4a4b57", + "color-sidebar-brand-text": "#343745", + "color-sidebar-background-hover": "#F8F9FB", + "color-sidebar-item-background--hover": "#F8F9FB", "color-foreground-primary": "#333333", - "color-foreground-secondary": "#747481", - "color-foreground-muted": "#747481", - "color-foreground-border": "#e1e1e3", + "color-foreground-secondary": "#4f5366", + "color-foreground-muted": "#4f5366", + "color-foreground-border": "#EDEDF0", "color-background-primary": "#ffffff", "color-background-secondary": "#f7f7fa", - "color-content-foreground": "#747481", + "color-content-foreground": "#4f5366", "color-background-hover": "#c60443", - "color-background-border": "#e1e1e3", + "color-background-border": "#EDEDF0", "color-highlighted-background": "#1c1c2f", - "color-inline-code-background": "#747481", - "color-sidebar-background": "#f7f7fa", - "color-sidebar-background-border": "#e1e1e3", + "color-inline-code-background": "#4f5366", + "color-sidebar-background": "#FFFFFF", + "color-sidebar-background-border": "#EDEDF0", "color-sidebar-search-background": "#fff", - "sd-color-card-background": "#f7f7fa", - "sd-color-primary": "#4a4b57", + "color-card-border": "#E1E1E3", + "sd-color-card-background": "#fff", + "sd-color-primary": "#343745", + "sd-color-shadow": "none", "sidebar-tree-space-above": "8px", # Custom css variables "color-search": "#19191D", "color-search-focused": "#4A4B57", "color-search-border": "#B4B4BB", - "color-search-border-focused": "#0174BA", + "color-search-border-focused": "#4F5366", "color-search-container-outline-focused": "#B4E5FB", "color-search-background": "#FFFFFF", + "color-sidebar-search-icon": "#B4B4BB", "color-topnav-background": "#FFFFFF", "color-topnav-border": "#EDEEF3", - "color-topnav-link": "#E41A4A", + "color-topnav-link": "#1A1B22", "color-topnav-theme-toggle-border": "rgba(0, 0, 0, 0.1)", "color-topnav-button-primary": "#FFFFFF", "color-topnav-button-primary-hover": "#FFFFFF", - "color-topnav-button-primary-background": "#E41A4A", - "color-topnav-button-primary-hover-background": "#C60443", - "color-topnav-button-secondary": "#E41A4A", - "color-topnav-button-secondary-border": "#E41A4A", - "color-topnav-button-secondary-hover": "#E41A4A", - "color-topnav-button-secondary-hover-border": "#E41A4A", - "color-topnav-button-secondary-hover-background": "#FFF9FC" + "color-topnav-button-primary-background": "#FF5200", + "color-topnav-button-primary-hover-background": "#F04D00", + "color-topnav-button-secondary": "#1A1B22", + "color-topnav-button-secondary-border": "#B4B7C5", + "color-topnav-button-secondary-hover": "#1A1B22", + "color-topnav-button-secondary-hover-border": "#4F5366", + "color-topnav-button-secondary-hover-background": "transparent", + "color-toc-item-text--active": "#007cc2", + "color-highlight-on-target": "#F8F9FB", }, "dark_css_variables": { - "color-brand-primary": "#d2d2d6", + "color-brand-primary": "#FF5200", "color-brand-content": "#ffffff", - "color-link": "#d2d2d6", + "color-link": "#6DCDF8", + "color-link--hover": "#3DBBF5", + "color-link-underline--hover": "#3DBBF5", "font-stack": "Inter, sans-serif", - "color-sidebar-brand-text": "#d2d2d6", - "color-sidebar-link-text": "#d2d2d6", - "color-sidebar-link-text--top-level": "#d2d2d6", + "color-sidebar-brand-text": "#D2D2D6", + "color-sidebar-background-hover": "#161825", + "color-sidebar-item-background--hover": "#161825", + "color-sidebar-link-text": "#D2D2D6", + "color-sidebar-link-text--top-level": "#D2D2D6", "color-foreground-primary": "#ffffff", - "color-foreground-secondary": "#83839d", - "color-foreground-muted": "#747481", - "color-foreground-border": "#e1e1e3", + "color-foreground-secondary": "#D2D2D6", + "color-foreground-muted": "#D2D2D6", + "color-foreground-border": "#3A3A44", "color-background-primary": "#11111e", "color-background-secondary": "#1c1c2f", - "color-content-foreground": "#83839d", + "color-content-foreground": "#D2D2D6", "color-background-hover": "#ff3554", - "color-background-border": "#e1e1e3", + "color-background-border": "#3A3A44", "color-highlighted-background": "#1c1c2f", "color-inline-code-background": "#f7f7fa", - "color-sidebar-background": "#0b0b14", - "color-sidebar-background-border": "#e1e1e3", + "color-sidebar-background": "#11111E", + "color-sidebar-background-border": "#3A3A44", "color-sidebar-search-background": "#1c1c2f", "color-admonition-title-background--tip": "#00c85240", "color-admonition-title-background--note": "#00b0ff40", "color-admonition-title-background--warning": "#ff910040", "color-admonition-title-background--error": "#ff525240", - "sd-color-card-background": "#0b0b14", + "sd-color-card-background": "#161825", "sd-color-card-header": "#0b0b14", - "sd-color-primary": "#e1e1e3", + "sd-color-primary": "#ffffff", + "sd-color-shadow": "none", + "color-card-border": "#3A3A44", "sidebar-tree-space-above": "8px", # Custom css variables @@ -182,19 +198,22 @@ "color-search-border-focused": "#7FD1F7", "color-search-container-outline-focused": "#0174BA", "color-search-background": "#11111E", + "color-sidebar-search-icon": "#F7F7FA", "color-topnav-background": "#0B0B14", "color-topnav-border": "#3A3A44", "color-topnav-link": "#F7F7FA", "color-topnav-theme-toggle-border": "rgba(255, 255, 255, 0.1)", - "color-topnav-button-primary": "black", - "color-topnav-button-primary-hover": "black", - "color-topnav-button-primary-background": "#ffffff", - "color-topnav-button-primary-hover-background": "#EDEDF0", + "color-topnav-button-primary": "#FFFFFF", + "color-topnav-button-primary-hover": "#FFFFFF", + "color-topnav-button-primary-background": "#FF5200", + "color-topnav-button-primary-hover-background": "#F04D00", "color-topnav-button-secondary": "#f7f7fa", "color-topnav-button-secondary-border": "#f7f7fa", "color-topnav-button-secondary-hover": "#f7f7fa", "color-topnav-button-secondary-hover-border": "#f7f7fa", - "color-topnav-button-secondary-hover-background": "rgba(255, 255, 255, 0.1)" + "color-topnav-button-secondary-hover-background": "transparent", + "color-toc-item-text--active": "#6DCDF8", + "color-highlight-on-target": "#161825", }, "navigation_with_keys": True } diff --git a/docs/platform/concepts.rst b/docs/platform/concepts.rst index eacee5445d..63b38c4d56 100644 --- a/docs/platform/concepts.rst +++ b/docs/platform/concepts.rst @@ -51,9 +51,9 @@ Learn about some of the key concepts for working with Aiven platform: Add (and remove) storage on the fly without upgrading your plan. -* :doc:`Bring your own account (BYOA) `. +* :doc:`Bring your own cloud (BYOC) `. - BYOA is an optional setup feature that allows you to purchase your Aiven services through your existing Cloud Provider. + BYOC is an optional setup feature that allows you to purchase your Aiven services through your existing Cloud Provider. * :doc:`Enhanced Compliance Environments (ECE) `. diff --git a/docs/platform/concepts/aiven-node-firewall-configuration.rst b/docs/platform/concepts/aiven-node-firewall-configuration.rst index 08ca2a0ba0..a640e526b9 100644 --- a/docs/platform/concepts/aiven-node-firewall-configuration.rst +++ b/docs/platform/concepts/aiven-node-firewall-configuration.rst @@ -6,7 +6,7 @@ The iptables configuration is generated dynamically at runtime depending on serv Intra-node connections are limited to point-to-point connections to specific IP addresses. All traffic to ports that are not required for the service to function is rejected instead of dropped to avoid timeouts. Service ports that you can connect to depend on the service type and deployment type. The configuration can also affect the ports that are available: -* Is the service in a public network, :doc:`dedicated VPC `, virtual cloud account, or a :doc:`Bring Your Own Account (BYOA) ` setup ? +* Is the service in a public network, :doc:`dedicated VPC `, virtual cloud account, or a :doc:`Bring Your Own Cloud (BYOC) ` setup ? * Have you configured IP ranges in  user_config.ip_filter? * Have you :doc:`enabled public internet access for services in a VPC `? @@ -45,6 +45,6 @@ Enhanced compliance environments -------------------------------- In `Enhanced Compliance Environments (ECE) `_, there is additional filtering at VPC level and a SOCKS5 proxy. ECE environments have more variable configurations because we provide more flexibility for configuring these to meet your requirements. Typically, ECE nodes are accessible only over VPC connections and are not exposed to the internet. This results in layered firewalls with cloud-provider SDN firewalls and individual node-specific iptables rules. -BYOA environments +BYOC environments ----------------- -With the BYOA deployment model, you deploy Aiven services under your own cloud accounts. This gives you greater control over deployment configuration, but the VM-level firewall configurations are set at deployment time according to Aiven base configurations. You can apply additional firewalls using your cloud service provider's configuration options. +With the BYOC deployment model, you deploy Aiven services under your own cloud accounts. This gives you greater control over deployment configuration, but the VM-level firewall configurations are set at deployment time according to Aiven base configurations. You can apply additional firewalls using your cloud service provider's configuration options. diff --git a/docs/platform/concepts/byoa.rst b/docs/platform/concepts/byoc.rst similarity index 70% rename from docs/platform/concepts/byoa.rst rename to docs/platform/concepts/byoc.rst index e64f68ede3..9bb99b7d1c 100644 --- a/docs/platform/concepts/byoa.rst +++ b/docs/platform/concepts/byoc.rst @@ -1,5 +1,5 @@ -Bring your own account (BYOA) -============================= +Bring your own cloud (BYOC) +=========================== Aiven services are usually deployed on Aiven managed infrastructure, using Aiven managed security protocols, and backed by Aiven managed storage and backups. @@ -7,23 +7,24 @@ This provides the most seamless, straight forward, and de-risked approach to dep Aiven services. However, there are cases where this approach is not appropriate, such as the need to achieve strict regulatory compliance. -In cases like these, Aiven offers customers the ability to instead BYOA (Bring -Your Own Account). BYOA allows customers to manage their own infrastructure, +In cases like these, Aiven offers customers the ability to instead BYOC (Bring +Your Own Cloud). BYOC allows customers to manage their own infrastructure, their own security posture and keep their data in their own cloud. -When to consider bringing your own account ------------------------------------------- +When to consider bringing your own cloud +---------------------------------------- -There are three major reasons to utilize BYOA: +There are three major reasons to utilize BYOC: -1. **Compliance**: Aiven offers managed environments for several standard compliance regulations such as HIPAA, PCI DSS and GDPR. However, if you have strict regulatory requirements, or special compliance requirements, BYOA may be the best option for you. -2. **Network auditing**: If you require visibility of all traffic within any VPC you operate in or need frequent auditing capabilities, BYOA is potentially a good fit. BYOA gives you the ability to audit network metadata but not the actual contents. -3. **Fine grained network control**: BYOA requires only some specific network access (e.g. service management and troubleshooting), otherwise allowing you to customize your network to meet any internal requirements or requirements of your customers. +1. **Compliance**: Aiven offers managed environments for several standard compliance regulations such as HIPAA, PCI DSS and GDPR. However, if you have strict regulatory requirements, or special compliance requirements, BYOC may be the best option for you. +2. **Network auditing**: If you require visibility of all traffic within any VPC you operate in or need frequent auditing capabilities, BYOC is potentially a good fit. BYOC gives you the ability to audit network metadata but not the actual contents. +3. **Fine grained network control**: BYOC requires only some specific network access (e.g. service management and troubleshooting), otherwise allowing you to customize your network to meet any internal requirements or requirements of your customers. +4. **Cost optimization**: Depending on your cloud provider, with Aiven BYOC you can use reserved instances, cost savings plans, committed use discounts, or other strategies to save on compute and storage infrastructure costs related to Aiven services. Who is eligible? ---------------- -The BYOA setup is a bespoke service offered on a case-by-case basis, and not +The BYOC setup is a bespoke service offered on a case-by-case basis, and not all cloud providers support it yet. Therefore customers must meet the following requirements: @@ -36,18 +37,17 @@ requirements: When to use a standard Aiven deployment --------------------------------------- -BYOA deployments are not automated and they add additional complexity for communicating +BYOC deployments are not automated and they add additional complexity for communicating to the Aiven control plane, service management, key management and security. In most cases customers can meet their regulatory and business requirements by utilizing -a standard Aiven deployment or :doc:`Enhanced Compliance Environment `. In fact, 99% of Aiven -customers are able to meet their requirements without BYOA. If you would like to understand -BYOA better or are unsure which deployment model is the best fit for you, please contact our sales department Sales@Aiven.io. +a standard Aiven deployment or :doc:`Enhanced Compliance Environment `. If you would like to understand +BYOC better or are unsure which deployment model is the best fit for you, please contact our sales department Sales@Aiven.io. Pricing and billing ------------------- -Unlike Aiven's standard all-inclusive pricing, the BYOA setup has custom +Unlike Aiven's standard all-inclusive pricing, the BYOC setup has custom pricing depending on the nature of your requirements. Customers entering this arrangement are responsible for all cloud infrastructure and network traffic charges. @@ -56,18 +56,18 @@ charges. For a cost estimate and analysis, please contact Sales@Aiven.io. -Architecture of BYOA deployments +Architecture of BYOC deployments -------------------------------- -With BYOA, you can use any standard Aiven method (e.g. :doc:`CLI `, :doc:`Terraform `) to manage your services and generally have the same user experience as with the regular Aiven deployment model. +With BYOC, you can use any standard Aiven method (e.g. :doc:`CLI `, :doc:`Terraform `) to manage your services and generally have the same user experience as with the regular Aiven deployment model. -BYOA standard +BYOC standard ''''''''''''' -.. image:: /images/platform/byoa-standard.png +.. image:: /images/platform/byoc-standard.png :alt: Overview architecture diagram with VPC set up -A standard BYOA deployment requires the customer to create a Virtual Private Cloud (VPC) +A standard BYOC deployment requires the customer to create a Virtual Private Cloud (VPC) dedicated to Aiven services within each region they want to operate. Aiven will access these VPCs via a static IP address and then route traffic through a proxy for additional security. In order to accomplish this, Aiven will utilize a bastion host, logically separated from the @@ -78,28 +78,28 @@ host and the service nodes will reside in a customer managed VPC, they will not Depending on the service being used, Aiven will take regular backups to enable forking, Point in Time Recovery (PITR) and disaster recovery. These backups by default will not -reside in the customer’s cloud account. If there is a requirement to have all backups -in your own account we can do this as well. Aiven will need object storage and permissions +reside in the customer’s cloud. If there is a requirement to have all backups +in your own cloud, we can do this as well. Aiven will need object storage and permissions to read and write in order to accomplish this. Please bear in mind that all backups are encrypted using Aiven managed keys and that the customer will be responsible for managing object storage configurations. -BYOA with IPsec ingress +BYOC with IPsec ingress ''''''''''''''''''''''' -.. image:: /images/platform/byoa-ipsec-ingress.png +.. image:: /images/platform/byoc-ipsec-ingress.png :alt: Overview architecture diagram with IPsec tunnel -A slight variation on a standard BYOA deployment enables Aiven to manage a customer's +A slight variation on a standard BYOC deployment enables Aiven to manage a customer's services through an IPsec tunnel. This deployment can be beneficial if management over the public internet is infeasible or adds additional complexity. -BYOA with direct IPsec ingress +BYOC with direct IPsec ingress '''''''''''''''''''''''''''''' -.. image:: /images/platform/byoa-ipsec-ingress-direct.png +.. image:: /images/platform/byoc-ipsec-ingress-direct.png :alt: Overview architecture diagram with direct IPsec access -Again a slight variation on a standard BYOA deployment enables Aiven to manage a customer's +Again a slight variation on a standard BYOC deployment enables Aiven to manage a customer's services through a direct IPsec tunnel. This deployment can be beneficial if there is a desire to reduce the number of Aiven managed components. diff --git a/docs/platform/concepts/cloud-security.rst b/docs/platform/concepts/cloud-security.rst index df3fe339f1..c2b8a370c1 100644 --- a/docs/platform/concepts/cloud-security.rst +++ b/docs/platform/concepts/cloud-security.rst @@ -89,3 +89,9 @@ The SBOM is a list of all packages that are being used by Aiven in the services SBOM reports are being widely adopted and may eventually be required for compliance or security assessments. We provide these reports as a file download via our :doc:`CLI `, in CSV or SPDX format. SBOM reports are only available to customers who have an enterprise support contract and all services within the project must have the latest maintenance patches applied. + + +Time synchronization +-------------------- + +All Aiven backend and customer services are configured to use trusted NTP (Network Time Protocol) servers of the respective cloud provider where each service is deployed. diff --git a/docs/platform/concepts/corporate-billing.rst b/docs/platform/concepts/corporate-billing.rst index df5b186b89..1f76e56262 100644 --- a/docs/platform/concepts/corporate-billing.rst +++ b/docs/platform/concepts/corporate-billing.rst @@ -8,7 +8,7 @@ Credit card payments The default billing method for all new Aiven customers is credit card. All costs accrued over a calendar month are charged on the first day of the following month. -Services are billed by the hour. The costs are automatically calculated based on the services running in a project. Each project is charged separately. +Services are billed by the hour. The costs are automatically calculated based on the services running in a project. Each project is charged separately, but the charges for multiple projects can be consolidated by assigning them to a :doc:`billing group `. Likewise, :doc:`organizations ` can have multiple billing groups. Credit card fees """""""""""""""""" diff --git a/docs/platform/concepts/enhanced-compliance-env.rst b/docs/platform/concepts/enhanced-compliance-env.rst index 6ec1fd5c69..1cb3007547 100644 --- a/docs/platform/concepts/enhanced-compliance-env.rst +++ b/docs/platform/concepts/enhanced-compliance-env.rst @@ -67,7 +67,7 @@ Although not exhaustive, Aiven is capable of supporting both the Health Insuranc Accountability Act (HIPAA) and the Payment Card Industry Data Security Standard (PCI DSS) compliances. If you require compliance beyond these please contact our sales department so we can better understand your specific needs. Additionally, we also offer an alternative deployment -option -- :doc:`Bring Your Own Account (BYOA) `. +option -- :doc:`Bring Your Own Cloud (BYOC) `. Migrating ---------------- diff --git a/docs/platform/concepts/hourly-billing-model.rst b/docs/platform/concepts/hourly-billing-model.rst index 644032d10c..d531fa785e 100644 --- a/docs/platform/concepts/hourly-billing-model.rst +++ b/docs/platform/concepts/hourly-billing-model.rst @@ -1,7 +1,13 @@ -Hourly billing model for all services -===================================== +Billing overview +================= -The prices as shown in the Aiven console are all-inclusive, meaning that all of the following are included in the hourly service price: +In the **Billing** section of `Aiven Console `_, you can manage your :doc:`billing groups `, :doc:`payment cards `, and view and download your invoices. + + +Service charges +---------------- + +The prices shown in the Aiven console are all-inclusive, meaning that all of the following are included in the hourly service price: * Virtual machine costs * Network costs @@ -11,10 +17,10 @@ The prices as shown in the Aiven console are all-inclusive, meaning that all of .. note:: While network traffic is not charged separately, your application cloud service provider may charge you for the network traffic going to or from their services. - Use of PrivateLink and/or additional storage will incur additional costs on top of the hourly service usage rate. + Use of PrivateLink and additional storage will incur additional costs on top of the hourly service usage rate. -The minimum hourly charge unit is one hour. For example, when you launch an Aiven service and terminate it after 40 minutes, you will be charged for one full hour. Likewise, if you terminate a service after 40.5 hours, you will be charged for 41 hours. +The minimum hourly charge unit is one hour. For example, when you launch an Aiven service and terminate it after 40 minutes, you will be charged for one full hour. Likewise, if you terminate a service after 40.5 hours, you will be charged for 41 hours. :doc:`Terminating or pausing a service <../howto/pause-from-cli>` will stop the accumulation of new charges immediately. However, please note that the minimum hourly charge unit still applies prior to terminating or pausing a service. -Upgrading or changing to different service plan levels (e.g., from **Startup-4** to **Business-8**) will not incur any additional cost. Additionally, migrating a service to another cloud region or to a different cloud provider does not incur any additional costs. \ No newline at end of file +Migrating a service to another cloud region or to a different cloud provider does not incur any additional costs. \ No newline at end of file diff --git a/docs/platform/concepts/maintenance-window.rst b/docs/platform/concepts/maintenance-window.rst index 4d2688d686..30f58cb636 100644 --- a/docs/platform/concepts/maintenance-window.rst +++ b/docs/platform/concepts/maintenance-window.rst @@ -22,4 +22,4 @@ In case of **Apache Kafka®** and **OpenSearch®** the service DNS address resol Maintenance updates ~~~~~~~~~~~~~~~~~~~ -Security updates, platform updates that affect reliability or stability of the service nodes, and quarterly patch updates are always mandatory. Other updates are initially optional. Advance notice is given for all updates. After optional updates have been available for six months, they become mandatory and are applied on the next week's maintenance window at the earliest. This means you have at least 7 days advance notice with exception of critical security updates. These critical updates are applied in the maintenance window of the current week. \ No newline at end of file +Security updates, platform updates that affect reliability or stability of the service nodes, and quarterly patch updates are always mandatory. Other updates are initially optional. Advance notice is given for all updates. After optional updates have been available for six months, they become mandatory and are applied on the next week's maintenance window at the earliest. This means you have at least 7 days advance notice with exception of critical security updates. These critical updates are applied in the maintenance window of the current week. During service upgrades, maintenance updates are automatically applied and do not require any action from you. \ No newline at end of file diff --git a/docs/platform/concepts/service-power-cycle.rst b/docs/platform/concepts/service-power-cycle.rst index c538491f3c..67bebb5d94 100644 --- a/docs/platform/concepts/service-power-cycle.rst +++ b/docs/platform/concepts/service-power-cycle.rst @@ -52,3 +52,5 @@ When a service is powered on, the following things will happen: Depending on the service plan, backups have different retention periods. Data will be lost after the retention period. +.. Note:: + Maintenance updates are automatically applied when a service is powered on as new virtual machines are created for the service to run on. \ No newline at end of file diff --git a/docs/platform/concepts/service_backups.rst b/docs/platform/concepts/service_backups.rst index e9a60e3707..5721806a8a 100644 --- a/docs/platform/concepts/service_backups.rst +++ b/docs/platform/concepts/service_backups.rst @@ -6,7 +6,12 @@ This article provides information on general rules for handling service backups About backups at Aiven ---------------------- -All Aiven services, except for Apache Kafka® and M3 Aggregator/Coordinator, have time-based backups that are encrypted and securely stored. The backup retention times vary based on the service and the selected service plan. +All Aiven services, except for Apache Kafka® and M3 Aggregator/Coordinator, have time-based backups that are encrypted and securely stored. Backups at Aiven are stored in the object storage of the cloud region where a service runs (for example, S3 for AWS or GCS for GCP). You can check the location of your service's backups in `Aiven Console `_ > your service's homepage > **Backups** tab. + +.. image:: /images/platform/concepts/backup_location_preview.png + :alt: Backup location preview on console + +The backup retention times vary based on the service and the selected service plan. Aiven takes service backups for managing purposes. These backups are compressed and encrypted by the Aiven management platform and, as such, are not available for download for any service type. diff --git a/docs/platform/howto/billing-aws-marketplace-subscription.rst b/docs/platform/howto/billing-aws-marketplace-subscription.rst new file mode 100644 index 0000000000..5cd7478ee2 --- /dev/null +++ b/docs/platform/howto/billing-aws-marketplace-subscription.rst @@ -0,0 +1,33 @@ +Set up AWS Marketplace for Aiven services +=========================================== + +Aiven makes its services available through the Amazon AWS Marketplace. This article shows the steps needed to create a subscription that links the accounts. + +First, there are some steps that need to be completed on the AWS Marketplace page before heading over to the Aiven Console and finishing the process. + +AWS Marketplace setup +--------------------- + +1. Search for "Aiven Managed Database Services" on the `AWS Marketplace `_. This page contains information about all of Aiven's services and how the marketplace subscription works. Click the **View purchase options** button on this page. + +.. image:: /images/platform/howto/aws-marketplace-listing.png + :alt: AWS Marketplace purchase options button for Aiven Managed Database Services + +2. When you are ready, click the **Subscribe** button on the page. You will NOT be charged by clicking this button; this only sets up a billing subscription between AWS and Aiven. You will only be charged after deploying Aiven services. + +3. Click **Set up your account**. This takes you to the Aiven Console to complete the process. + +Aiven account setup +------------------- + +4. You should now be on the AWS signup page at Aiven, asking you to sign up or log in. + +5. After registering or logging in, choose or create an Aiven organization to use the AWS subscription for. If you have any existing Aiven projects that you want to be moved to this AWS subscription, this organization name is the one you will need for that. + +If you have any issues linking Aiven to your AWS subscription, you can try the process again in the AWS web console by finding the Aiven subscription and clicking **Set up your account**. + + +.. note:: + The URL that you log in to for your AWS subscription is https://console.aws.aiven.io. This is different from the Aiven Console (https://console.aiven.io). + + diff --git a/docs/platform/howto/billing-azure-marketplace-subscription.rst b/docs/platform/howto/billing-azure-marketplace-subscription.rst index b57baae037..ad975be5f9 100644 --- a/docs/platform/howto/billing-azure-marketplace-subscription.rst +++ b/docs/platform/howto/billing-azure-marketplace-subscription.rst @@ -3,41 +3,38 @@ Set up Azure Marketplace for Aiven services Aiven makes its services available through the Microsoft Azure Marketplace. This article shows the steps needed to create a subscription that links the accounts. -First, there are some steps that need to be completed on the Azure Marketplace page before heading over to the Aiven console and finishing the process. +First, there are some steps that need to be completed on the Azure Marketplace page before heading over to the Aiven Console and finishing the process. Azure Marketplace setup ----------------------- -1. Search for "Aiven Managed Database Services" on the `Azure Marketplace `_. This page contains information about all of Aiven's services and how the marketplace subscription works. Click the **Subscribe** button on this page. +1. Search for "Aiven Managed Database Services" on the `Azure Marketplace `_. This page contains information about all of Aiven's services and how the marketplace subscription works. Click the **Subscribe** button on this page. .. image:: /images/platform/howto/azure-marketplace-listing.png :alt: Azure Marketplace listing tile for Aiven Managed Database Services :height: 342px -2. Select your desired Azure subscription resource group to organise your resources, give the subscription a name, and make sure that "Recurring billing" is turned on. There is only one plan available because all of the costs are managed by Aiven based on what you use during the month. +2. Select your desired Azure subscription resource group to organise your resources, give the subscription a name, and make sure that "Recurring billing" is turned on. There is only one plan available because all of the costs are managed by Aiven based on what you use during the month. 3. Progress to the "Review + subscribe" screen, then read and agree to the terms of use. -4. When you are ready, click the **Subscribe** button at the bottom of the page. You will NOT be charged by clicking this button; this only sets up a billing subscription between Azure and Aiven. You will only be charged after deploying Aiven services. +4. When you are ready, click the **Subscribe** button at the bottom of the page. You will NOT be charged by clicking this button; this only sets up a billing subscription between Azure and Aiven. You will only be charged after deploying Aiven services. -5. You should now see a message that says "Your SaaS subscription is in progress". This takes a few minutes to complete before you can progress. +5. You should now see a message that says "Your SaaS subscription is in progress". This takes a few minutes to complete before you can progress. 6. When you see the message "Thank you for your order. Configure the SaaS service to complete the purchase", click the "Configure account now" button to head over to the Aiven website to complete the process. Aiven account setup ------------------- -7. You should now be on the `Azure signup page at Aiven `_, asking you for your email address to log in to the account. This should be the same email as you use on the Azure console. +7. You should now be on the `Azure signup page at Aiven `_, asking you for your email address to log in to the account. This should be the same email as you use on the Azure console. -8. After entering your email address, you will be authenticated via Azure single sign-on and then returned to the Aiven console. +8. After entering your email address, you will be authenticated via Azure single sign-on and then returned to the Aiven Console. 9. You will be sent an email to "Activate your new subscription" - click on the **Activate now** link to join your Aiven account to your Azure account. -10. You are now ready to create your first project and deploy services. - .. note:: - Note the URL is https://console.azure.aiven.io - this uses a different account system than https://console.aiven.io. When coming back to Aiven in the future, you will need to use https://console.azure.aiven.io to login, and authenticate using Azure OAuth. + The URL is https://console.azure.aiven.io - this is different from the Aiven Console (https://console.aiven.io). -.. note:: - When you view the Aiven subscription on the Azure SaaS resource list, you will see a link to **Open SaaS Account on publisher's site**. You can use this link to complete the subscription process if anything goes wrong during the steps listed here. +When you view the Aiven subscription on the Azure SaaS resource list, you will see a link to **Open SaaS Account on publisher's site**. You can use this link to complete the subscription process if anything goes wrong during the steps listed here. diff --git a/docs/platform/howto/change-billing-contact.rst b/docs/platform/howto/change-billing-contact.rst index ec772e4771..282c8d6062 100644 --- a/docs/platform/howto/change-billing-contact.rst +++ b/docs/platform/howto/change-billing-contact.rst @@ -1,24 +1,4 @@ -Billing contact -====================== - -For projects using credit card billing, the billing contact is automatically set to the user who added the payment card and assigned it to the project(s). The billing contact is the user who owns the project's credit card. -You cannot remove the billing contact from the project(s). However, you change the billing contact user. - -Change billing contact -~~~~~~~~~~~~~~~~~~~~~~ -To change the billing contact user, you need to add the payment card to the user's profile you want to assign as the billing contact. - -1. In the `Aiven web console `_, click the **User information** icon on the top right of the screen to view the User profile. - -.. image:: /images/platform/billing/billing_user_information.png - :alt: Select the user information icon to view billing contact - -2. Next, click the **Payment Options** tab. -3. Enter the new credit card details and click **Save card**. -4. To the payment card added, assign the project(s) from the previous card and click **Assign**. - -.. image:: /images/platform/billing/billing_assign_card.png - :alt: Assign payment card to projects - -Once the project(s) are assigned, the new billing contact is set, and you can delete the previous billing contact user. +Change billing contacts +======================== +To change the billing contact to a different email address, :doc:`update your billing information ` in the billing group. \ No newline at end of file diff --git a/docs/platform/howto/edit-user-profile.rst b/docs/platform/howto/edit-user-profile.rst new file mode 100644 index 0000000000..c20a981165 --- /dev/null +++ b/docs/platform/howto/edit-user-profile.rst @@ -0,0 +1,12 @@ +Edit your user profile +======================== + +To edit your name, job title, location, or other personal details in your profile: + +#. In the `Aiven Console `_ click the **User information** icon in the top right. + +#. Select **User profile**. + +#. Click **Edit profile**. + +#. Make the changes and click **Save changes**. diff --git a/docs/platform/howto/feature-preview.rst b/docs/platform/howto/feature-preview.rst new file mode 100644 index 0000000000..d628558569 --- /dev/null +++ b/docs/platform/howto/feature-preview.rst @@ -0,0 +1,15 @@ +Feature previews +================= + +Before an official release, some features are available to our customers for testing. These feature previews let you try out upcoming enhancements and give our product teams feedback to help improve them. + +Enable a feature preview +------------------------- + +To try upcoming features before they are released: + +#. Click the **User information** icon in the top right and select **Feature preview**. + +#. On the **Feature preview** tab, click **Enable** for any of the features you want to test. + +After enabling a feature preview and testing it, you can provide feedback from this page by clicking **Give feedback**. \ No newline at end of file diff --git a/docs/platform/howto/list-user.rst b/docs/platform/howto/list-user.rst index 3dbcf8f799..8426523c29 100644 --- a/docs/platform/howto/list-user.rst +++ b/docs/platform/howto/list-user.rst @@ -1,6 +1,6 @@ User and authentication management =================================== -Browse through instructions for common Aiven platform tasks related to managing users and authentication settings. +Browse through instructions for common Aiven platform tasks related to managing users, profiles, and authentication settings. .. tableofcontents:: diff --git a/docs/platform/howto/manage-org-users.rst b/docs/platform/howto/manage-org-users.rst new file mode 100644 index 0000000000..88805484a2 --- /dev/null +++ b/docs/platform/howto/manage-org-users.rst @@ -0,0 +1,59 @@ +Manage users in an organization +================================ + +.. important:: + Organization users are available as a feature preview and must be :doc:`enabled in the user profile `. + +Adding users to your organization lets you give them access to specific organizational units, projects, and services within that organization. + +Invite users to an organization +--------------------------------- + +To add users to your organization, send them an invite: + +#. Click **Admin**. + +#. Click **Users**. + +#. Click **Invite users**. + +#. Enter the email addresses of the people you want to invite. + +#. Click **Invite users**. + +The users receive an email with instructions to sign up (for new users) and accept the invite. + + +Remove users from an organization +---------------------------------- + +If you remove a user from an organization, they will also be removed from all teams and projects and no longer have access to any resources in the organization. + +To remove a user from an organization: + +#. Click **Admin**. + +#. Click **Users**. + +#. Find the user that you want to remove and click the **Actions** menu. + +#. Select **Remove**. + +#. Confirm you want to remove the user by clicking **Remove user**. + + +Resend an invite +----------------- + +If you need to resend an invite to a user: + +#. Click **Admin**. + +#. Click **Users**. + +#. Find the email address that you want to resend an invite to and click the **Actions** menu. + +#. Select **Resend invite**. + +They get a new email with instructions for signing up or accepting the invite. + diff --git a/docs/platform/howto/manage-payment-card.rst b/docs/platform/howto/manage-payment-card.rst index bbba0b7fe5..ecd46dd0a6 100644 --- a/docs/platform/howto/manage-payment-card.rst +++ b/docs/platform/howto/manage-payment-card.rst @@ -1,14 +1,28 @@ -Manage payment card +Manage payment cards ====================== -For a project using credit card billing, a payment card is the credit card to which all project billing is invoiced. The payment card, also known as the project's credit, is owned by the billing contact. -The billing contact can add or update the payment card and assign project(s) to this payment card. +You can a add payment card to your organization and use it as the payment method for one or more billing groups. -Add or update payment card -~~~~~~~~~~~~~~~~~~~~~~~~~~ -The billing contact can add or update the payment card and assign project(s) to this payment card. -1. In the `Aiven web console `_, click the **User information** icon on the top right of the screen to view the User profile. -2. Next, click the **Payment Options** tab. -3. You can add a new payment card or update the existing payment card, in the **Payment Options** tab. -4. If you have added a new payment card, ensure to assign this card to project(s). +Add a payment card +~~~~~~~~~~~~~~~~~~~~ +To add a payment card in the `Aiven Console `_: + +#. Click **Billing**. +#. Click **Payment methods**. +#. On the **Payment cards** tab in the **Add payment card** section, enter the credit card details. +#. Click **Add card**. + +The new card is displayed in the **Available payment cards** section and can be used in your billing groups. + +Delete a payment card +~~~~~~~~~~~~~~~~~~~~~~ + +You can't delete a payment card that is assigned to a billing group. If you want to delete a payment card, :doc:`remove it from all billing groups ` first. + +To delete a payment card in the `Aiven Console `_: + +#. Click **Billing**. +#. Click **Payment methods**. +#. On the **Payment cards** tab, find the card you want to delete. +#. Click **Delete** and confirm you want to delete the card. \ No newline at end of file diff --git a/docs/platform/howto/manage-vpc-peering.rst b/docs/platform/howto/manage-vpc-peering.rst index 02cd8480fa..0998687928 100644 --- a/docs/platform/howto/manage-vpc-peering.rst +++ b/docs/platform/howto/manage-vpc-peering.rst @@ -75,4 +75,17 @@ You can enable public internet access for your services by following the :doc:`E IP filtering (the Allowed IP Addresses list on the service overview page) is still available for a service deployed to a VPC where both public and private access are allowed. We recommend that you use IP filtering when your VPC service is also exposed to the public internet. -Also note that safelisting applies to both internal and external traffic. If you safelist an external IP address and want to keep traffic flowing with the internal (peered) connections, make sure that you safelist the CIDR blocks of the peered networks as well to avoid disruptions to the service. \ No newline at end of file +Also note that safelisting applies to both internal and external traffic. If you safelist an external IP address and want to keep traffic flowing with the internal (peered) connections, make sure that you safelist the CIDR blocks of the peered networks as well to avoid disruptions to the service. + +Troubleshoot VPC connection issues +------------------------------------- + +Any network changes to VPC peered hosts external from Aiven can cause issues with routing to your Aiven services hosted in a VPC. To troubleshoot such issues, take the following steps: + +1. In `Aiven Console `_, select **VPC**. +2. Find the ID of the affected VPC and select it from the **Internal ID** column. +3. Select **Refresh VPC connections**. + +As a result, the platform checks the VPC peering connection and rebuilds the peering connection state if there are any changes detected. + +For any other issues, open a support ticket from Aiven Console to get in touch with the support team and/or see :doc:`Get support in the Aiven Console `. \ No newline at end of file diff --git a/docs/platform/howto/move-to-aws-marketplace-billing.rst b/docs/platform/howto/move-to-aws-marketplace-billing.rst new file mode 100644 index 0000000000..78ff1fcd5a --- /dev/null +++ b/docs/platform/howto/move-to-aws-marketplace-billing.rst @@ -0,0 +1,28 @@ +Move from Aiven direct billing to AWS Marketplace +================================================= + +Aiven makes its services available through the AWS Marketplace. If you already have some services running in a project which is billed directly through Aiven but you would like to move to an AWS Marketplace subscription without disrupting your services, this article shows the steps needed to gather the relevant information and submit the request. + +Set up an AWS Marketplace subscription for Aiven +---------------------------------------------------------- + +Follow the steps to :doc:`set up AWS Marketplace for Aiven services `. This will create a new Aiven organization, which is where your projects will be moved to. + +Gather the required information +------------------------------- + +Aiven will need some information from both your existing user account and your new subscription in order to perform the migration. + +**From your existing Aiven user account:** + +* The name of the Aiven projects that have the services you wish to move. + +**From your new Aiven organization with the AWS marketplace subscription:** + +* The name of the new organization. + +Send the request to Aiven +------------------------- + +Once you have collected the information above, send it by email to `sales@Aiven.io `_ and someone will be in touch to complete the process. + diff --git a/docs/platform/howto/move-to-azure-marketplace-billing.rst b/docs/platform/howto/move-to-azure-marketplace-billing.rst new file mode 100644 index 0000000000..dc9a60865e --- /dev/null +++ b/docs/platform/howto/move-to-azure-marketplace-billing.rst @@ -0,0 +1,28 @@ +Move from Aiven direct billing to Azure Marketplace +=================================================== + +Aiven makes its services available through the Azure Marketplace. If you already have some services running in a project which is billed directly through Aiven but you would like to move to an Azure Marketplace subscription without disrupting your services, this article shows the steps needed to gather the relevant information and submit the request. + +Set up an Azure Marketplace subscription for Aiven +-------------------------------------------------- + +Follow the steps to :doc:`set up Azure Marketplace for Aiven services `. This will create a new Aiven organization, which is where your projects will be moved to. + +Gather the required information +------------------------------- + +Aiven will need some information from both your existing user account and your new subscription in order to perform the migration. + +**From your existing Aiven user account:** + +* The name of the Aiven projects that have the services you wish to move. + +**From your new Aiven organization with the Azure marketplace subscription:** + +* The name of the new organization. + +Send the request to Aiven +------------------------- + +Once you have collected the information above, send it by email to `sales@Aiven.io `_ and someone will be in touch to complete the process. + diff --git a/docs/platform/howto/move-to-gcp-marketplace-billing.rst b/docs/platform/howto/move-to-gcp-marketplace-billing.rst index fdbb0268a7..345ab51ab5 100644 --- a/docs/platform/howto/move-to-gcp-marketplace-billing.rst +++ b/docs/platform/howto/move-to-gcp-marketplace-billing.rst @@ -19,7 +19,7 @@ Aiven will need some information from both your existing and new subscriptions i **From your new Aiven organization with the GCP marketplace subscription:** -* Your new Aiven organization name, as shown at the top of the `Aiven GCP console `_. +* Your new Aiven organization name, as shown in the `Aiven GCP console `_. Send the request to Aiven ------------------------- diff --git a/docs/platform/howto/service-metrics.rst b/docs/platform/howto/service-metrics.rst new file mode 100644 index 0000000000..21cb511b07 --- /dev/null +++ b/docs/platform/howto/service-metrics.rst @@ -0,0 +1,39 @@ +Access service metrics +======================= + +Service metrics are essential for monitoring and evaluating how well your services perform. They provide valuable information about how your services utilize resources, their efficiency, and their overall health. By tracking and analyzing service metrics, you can make data-driven decisions, identify potential issues, and optimize the performance of your services. + +Available service metrics +-------------------------- + +The service metrics available in the Aiven Console include: + +* **CPU usage:** Shows the percentage of CPU resources consumed by the service. +* **Disk space usage:** Represents the percentage of disk space utilized by the service. +* **Disk iops (reads):** Indicates the input/output operations per second (IOPS) for disk reads. +* **Disk iops (writes):** Indicates the input/output operations per second (IOPS) for disk writes. +* **Load average:** Shows the 5-minute average CPU load, indicating the system's computational load. +* **Memory usage:** Represents the percentage of memory resources utilized by the service. +* **Network received:** Indicates the amount of network traffic received by the service, measured in bytes per second. +* **Network transmitted:** Indicates the amount of network traffic transmitted by the service, also measured in bytes per second. + +View service metrics +--------------------- + +The `Aiven Console `_ provides a user-friendly interface for accessing service metrics. Follow these steps to retrieve the metrics: + +1. Go to your **Services**, and open the service you want to review. +2. Navigate to the **Metrics** tab. +3. Choose the desired period for which you want to retrieve the metrics. The available options are: + + * **Hour:** Last hour metrics, updated every 30 seconds. + * **Day:** Last day metrics, updated every 5 minutes. + * **Week:** Last week metrics, updated every 30 minutes. + * **Month:** Last month metrics, updated every 3 hours. + * **Year:** Last year metrics, updated daily. + +.. note:: + The selected period is relative to the current date and time. For instance, when you select the **Hour** option, it retrieves metrics for the last hour. + + +To further enhance your :doc:`monitoring capabilities `, you can **Enable metrics integration** and establish a connection to push service metrics to an M3, InfluxDB®, or PostgreSQL® service within the Aiven platform. This integration allows you to conveniently send your service metrics to an existing service or create a new one dedicated to receiving and storing the metrics. diff --git a/docs/platform/howto/update-tax-status.rst b/docs/platform/howto/update-tax-status.rst index 2a00ae0ecc..953db274de 100644 --- a/docs/platform/howto/update-tax-status.rst +++ b/docs/platform/howto/update-tax-status.rst @@ -3,15 +3,12 @@ Update your tax status You can add your VAT ID in the billing information in the Aiven Console: -1. Click **Billing** and select the billing group that you want to update. -2. Click **Billing information**. -3. In the **Billing Details** section, click **Edit**. -4. Enter your VAT ID and click **Save**. - -.. important:: - - Please be aware that you can set up one billing profile per project or create a Billing Group to group costs and consolidate invoices by different business units. - For further information please refer to :doc:`Billing Groups `. - -If needed, you can change the billing address by clicking **Edit** in the **Company Details** section. Once the billing country has been updated, it will be used in all future invoices. +#. Click **Billing**. +#. Click **Billing groups**. +#. Select the billing group that you want to update. +#. Click **Billing information**. +#. In the **Billing details** section, click **Edit**. +#. Enter your **VAT ID** and click **Save**. + +You can change also the billing address by clicking **Edit** in the **Company details** section. Once the billing country has been updated, it will be used in all future invoices. Billing estimates are updated approximately once an hour, so it may take up to an hour for the new tax status to become visible on the invoice estimates. diff --git a/docs/platform/howto/use-billing-groups.rst b/docs/platform/howto/use-billing-groups.rst index 843671d315..9b12952bce 100644 --- a/docs/platform/howto/use-billing-groups.rst +++ b/docs/platform/howto/use-billing-groups.rst @@ -12,15 +12,6 @@ Rename billing groups #. Enter the new name and click **Rename**. -Download invoices -"""""""""""""""""" - -#. Select the name of the billing group. - -#. On the **Invoices** tab, find the billing period that you want to download an invoice for. - -#. Click the three dots in the **Actions** column and select **Download PDF** or **Download CSV**. - Update your billing information """""""""""""""""""""""""""""""" diff --git a/docs/platform/howto/use-google-private-service-connect.rst b/docs/platform/howto/use-google-private-service-connect.rst index 246691dae3..92da446349 100644 --- a/docs/platform/howto/use-google-private-service-connect.rst +++ b/docs/platform/howto/use-google-private-service-connect.rst @@ -111,7 +111,7 @@ To approve the connection, run the following approval command: .. code:: shell - avn privatelink google connection approve MY_SERVICE_NAME --privatelink-connection-id PRIVATELINK_CONNECTION_ID --user-ip-address PSC_ENDPOINT_IP_ADDRESS + avn service privatelink google connection approve MY_SERVICE_NAME --privatelink-connection-id PRIVATELINK_CONNECTION_ID --user-ip-address PSC_ENDPOINT_IP_ADDRESS As a result, the connection initially transitions to the user-approved state. diff --git a/docs/platform/howto/vnet-peering-azure.rst b/docs/platform/howto/vnet-peering-azure.rst index 53d420475f..ab5fcaf7e7 100644 --- a/docs/platform/howto/vnet-peering-azure.rst +++ b/docs/platform/howto/vnet-peering-azure.rst @@ -108,7 +108,7 @@ as ``$user_app_secret`` below ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This can be found in the Azure portal in "Virtual networks" -> name of -your network -> "Properties" -> "Resource ID", or using +your network -> “JSON View” -> "Resource ID", or using :: @@ -129,7 +129,8 @@ Also grab the output. Save this for later as ``$user_vnet_name`` ``$user_vnet_id`` should have the format -``/subscriptions/$user_subscription_id/resourceGroups/$user_resource_group/providers/Microsoft.Network/virtualNetworks/$user_vnet_name`` +``/subscriptions/$user_subscription_id/ +resourceGroups/$user_resource_group/providers/Microsoft.Network/virtualNetworks/$user_vnet_name`` 6. grant your service principal permissions to peer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/platform/reference/eol-for-major-versions.rst b/docs/platform/reference/eol-for-major-versions.rst index 3aef2e4352..ee828289bc 100644 --- a/docs/platform/reference/eol-for-major-versions.rst +++ b/docs/platform/reference/eol-for-major-versions.rst @@ -56,15 +56,17 @@ Aiven for OpenSearch® is the open source continuation of the original Elasticse .. container:: intercom-interblocks-table-container - +-------------+---------------+------------------+------------------+ - | **Version** | **Aiven EOL** | **Availability | **Upstream EOL** | - | | | end on Aiven | | - | | | Platform** | | - +-------------+---------------+------------------+------------------+ - | 1.x | 2023-12-31 | 2023-09-30 | 2023-12-31 | - +-------------+---------------+------------------+------------------+ - | 2.x | 20??-??-?? | 20??-??-?? | 20??-??-?? | - +-------------+---------------+------------------+------------------+ + +-------------+------------------------+------------------+------------------+ + | **Version** | **Aiven EOL** | **Availability | **Upstream EOL** | + | | | end on Aiven | | + | | | Platform** | | + +-------------+------------------------+------------------+------------------+ + | 1.x | 2023-12-31 | 2023-09-30 | 2023-12-31 | + +-------------+------------------------+------------------+------------------+ + | 2.x | `TBA*` | `TBA` | `TBA` | + +-------------+------------------------+------------------+------------------+ + + `*` To be announced Aiven for PostgreSQL® diff --git a/docs/products/clickhouse/howto.rst b/docs/products/clickhouse/howto.rst index ab15e55670..77d10ebae5 100644 --- a/docs/products/clickhouse/howto.rst +++ b/docs/products/clickhouse/howto.rst @@ -39,3 +39,5 @@ Aiven for ClickHouse® how-tos - :doc:`Connect Apache Kafka® to Aiven for ClickHouse® ` - :doc:`Connect PostgreSQL® to Aiven for ClickHouse® ` - :doc:`Connect to external database via JDBC ` + - :doc:`Manage Aiven for ClickHouse® data service integrations ` + - :doc:`Manage Aiven for ClickHouse® integration databases ` diff --git a/docs/products/clickhouse/howto/integrate-kafka.rst b/docs/products/clickhouse/howto/integrate-kafka.rst index aba8214490..3f6ae6eb36 100644 --- a/docs/products/clickhouse/howto/integrate-kafka.rst +++ b/docs/products/clickhouse/howto/integrate-kafka.rst @@ -55,8 +55,14 @@ The newly created database name has the following format: `service_KAFKA_SERVICE Update Apache Kafka integration settings ----------------------------------------- -Next step is to configure the topic and data format options for the integration. This will create a virtual table in Aiven for ClickHouse that can receive and send messages from multiple topics. You can have as many of such tables as you need. You need to define for each table following: +Next step is to configure the topic and data format options for the integration. This will create a virtual table in Aiven for ClickHouse that can receive and send messages from multiple topics. You can have as many of such tables as you need. +For each table, there are mandatory and optional setting to be defined. + +Mandatory settings +'''''''''''''''''' + +For each table, you need to define the following: * ``name`` - name of the connector table * ``columns`` - array of columns, with names and types @@ -64,24 +70,108 @@ Next step is to configure the topic and data format options for the integration. * ``data_format`` - your preferred format for data input, see :doc:`../reference/supported-input-output-formats` * ``group_name`` - consumer group name, that will be created on your behalf -Integration settings in a JSON format: - -.. code:: json - - { - "tables": [ - { - "name": "CONNECTOR_TABLE_NAME", - "columns": [ - {"name": "id", "type": "UInt64"}, - {"name": "name", "type": "String"} - ], - "topics": [{"name": "topic1"}, {"name": "topic2"}], - "data_format": "DATA_FORMAT", - "group_name": "CONSUMER_NAME" - } - ] - } +.. topic:: JSON format + + .. code-block:: json + + { + "tables": [ + { + "name": "CONNECTOR_TABLE_NAME", + "columns": [ + {"name": "id", "type": "UInt64"}, + {"name": "name", "type": "String"} + ], + "topics": [{"name": "topic1"}, {"name": "topic2"}], + "data_format": "DATA_FORMAT", + "group_name": "CONSUMER_NAME" + } + ] + } + +Optional settings +''''''''''''''''' + +For each table, you can define the following optional settings: + +.. list-table:: + :widths: 10 30 5 5 5 5 + :header-rows: 1 + + * - Name + - Description + - Default value + - Allowed values + - Minimum value + - Maximum value + * - ``auto_offset_reset`` + - Action to take when there is no initial offset in the offset store or the desired offset is out of range + - ``earliest`` + - ``smallest``, ``earliest``, ``beginning``, ``largest``, ``latest``, ``end`` + - -- + - -- + * - ``date_time_input_format`` + - Method to read ``DateTime`` from text input formats + - ``basic`` + - ``basic``, ``best_effort``, ``best_effort_us`` + - -- + - -- + * - ``handle_error_mode`` + - Method to handle errors for the Kafka engine + - ``default`` + - ``default``, ``stream`` + - -- + - -- + * - ``max_block_size`` + - Number of rows collected by poll(s) for flushing data from Kafka + - ``0`` + - ``0`` - ``1_000_000_000`` + - ``0`` + - ``1_000_000_000`` + * - ``max_rows_per_message`` + - Maximum number of rows produced in one Kafka message for row-based formats + - ``1`` + - ``1`` - ``1_000_000_000`` + - ``1`` + - ``1_000_000_000`` + * - ``num_consumers`` + - Number of consumers per table per replica + - ``1`` + - ``1`` - ``10`` + - ``1`` + - ``10`` + * - ``poll_max_batch_size`` + - Maximum amount of messages to be polled in a single Kafka poll + - ``0`` + - ``0`` - ``1_000_000_000`` + - ``0`` + - ``1_000_000_000`` + * - ``skip_broken_messages`` + - Minimum number of broken messages from Kafka topic per block to be skipped + - ``0`` + - ``0`` - ``1_000_000_000`` + - ``0`` + - ``1_000_000_000`` + +.. topic:: JSON format + + .. code-block:: json + + { + "tables": [ + { + "name": "CONNECTOR_TABLE_NAME", + "columns": [ + {"name": "id", "type": "UInt64"}, + {"name": "name", "type": "String"} + ], + "topics": [{"name": "topic1"}, {"name": "topic2"}], + "data_format": "DATA_FORMAT", + "group_name": "CONSUMER_NAME", + "auto_offset_reset": "earliest" + } + ] + } Configure integration with CLI -------------------------------- @@ -174,21 +264,6 @@ You can also bring the entries from ClickHouse table into the Apache Kafka topic .. _reference: Reference ----------- - -When connecting ClickHouse® to Kafka® using Aiven integrations, data exchange is possible with the following formats only: - -============================ ==================================================================================== -Format Example -============================ ==================================================================================== -CSV ``123,"Hello"`` -JSONASString ``{"x":123,"y":"hello"}`` -JSONCompactEachRow ``[123,"Hello"]`` -JSONCompactStringsEachRow ``["123","Hello"]`` -JSONEachRow ``{"x":123,"y":"hello"}`` -JSONStringsEachRow ``{"x":"123","y":"hello"}`` -MsgPack ``{\xc4\x05hello`` -TSKV ``x=123\ty=hello`` -TSV ``123\thello`` -TabSeparated ``123\thello`` -============================ ==================================================================================== +--------- + +When connecting ClickHouse® to Kafka® using Aiven integrations, data exchange requires using specific formats. Check the supported formats for input and output data in :doc:`Formats for ClickHouse®-Kafka® data exchange `. diff --git a/docs/products/clickhouse/howto/list-integrations.rst b/docs/products/clickhouse/howto/list-integrations.rst index d88f656715..bb48d8c820 100644 --- a/docs/products/clickhouse/howto/list-integrations.rst +++ b/docs/products/clickhouse/howto/list-integrations.rst @@ -20,3 +20,11 @@ This section provides instructions on how to integrate your Aiven for ClickHouse .. grid-item-card:: :doc:`Connect to external DBs with JDBC ` :shadow: md :margin: 2 2 0 0 + + .. grid-item-card:: :doc:`Manage Aiven for ClickHouse® data service integrations ` + :shadow: md + :margin: 2 2 0 0 + + .. grid-item-card:: :doc:`Manage Aiven for ClickHouse® integration databases ` + :shadow: md + :margin: 2 2 0 0 diff --git a/docs/products/clickhouse/reference/supported-input-output-formats.rst b/docs/products/clickhouse/reference/supported-input-output-formats.rst index f1ff59ebde..a1d4d563d0 100644 --- a/docs/products/clickhouse/reference/supported-input-output-formats.rst +++ b/docs/products/clickhouse/reference/supported-input-output-formats.rst @@ -6,9 +6,12 @@ When connecting ClickHouse® to Kafka® using Aiven integrations, data exchange ============================ ==================================================================================== Format name Notes ============================ ==================================================================================== -Avro Only supports binary Avro format with embedded schema. +Avro Binary Avro format with embedded schema. Libraries and documentation: https://avro.apache.org/ +AvroConfluent Binary Avro with schema registry. + + Requires the Karapace Schema Registry to be enabled in the Kafka service. CSV Example: ``123,"Hello"`` JSONASString Example: ``{"x":123,"y":"hello"}`` JSONCompactEachRow Example: ``[123,"Hello"]`` diff --git a/docs/products/flink/howto/pg-cdc-connector.rst b/docs/products/flink/howto/pg-cdc-connector.rst index 7f02a04122..1fc553ce00 100644 --- a/docs/products/flink/howto/pg-cdc-connector.rst +++ b/docs/products/flink/howto/pg-cdc-connector.rst @@ -99,7 +99,7 @@ If you encounter the ``must be superuser to create FOR ALL TABLES publication`` .. code:: SELECT * FROM aiven_extras.pg_create_publication_for_all_tables( - 'my_test_publication', + 'dbz_publication', 'INSERT,UPDATE,DELETE' ); diff --git a/docs/products/grafana/concepts/grafana-features.rst b/docs/products/grafana/concepts/grafana-features.rst index b5e1737cd3..7cd22429e2 100644 --- a/docs/products/grafana/concepts/grafana-features.rst +++ b/docs/products/grafana/concepts/grafana-features.rst @@ -7,7 +7,7 @@ Key features of Aiven for Grafana® include: Quick and flexible deployment options ------------------------------------- -With Aiven for Grafana, you can enjoy a quick and flexible deployment process, ensuring production-ready Grafana clusters are available in 10 minutes. You have the flexibility to choose your preferred public cloud platform for deployment from over 100 regions supported. The deployment process also includes high-performance nodes to enhance performance. Aiven supports the Bring-Your-Own-Account (BYOA) deployment model, enabling you to meet strict control requirements. +With Aiven for Grafana, you can enjoy a quick and flexible deployment process, ensuring production-ready Grafana clusters are available in 10 minutes. You have the flexibility to choose your preferred public cloud platform for deployment from over 100 regions supported. The deployment process also includes high-performance nodes to enhance performance. Aiven supports the Bring-Your-Own-Cloud (BYOC) deployment model, enabling you to meet strict control requirements. Integrate with existing Aiven tools and data infrastructure -------------------------------------------------------------- diff --git a/docs/products/kafka/concepts/kafka-quotas.rst b/docs/products/kafka/concepts/kafka-quotas.rst new file mode 100644 index 0000000000..d4c605e0f2 --- /dev/null +++ b/docs/products/kafka/concepts/kafka-quotas.rst @@ -0,0 +1,45 @@ +Quotas in Aiven for Apache Kafka® +==================================== + +Quotas ensure fair resource allocation, stability, and efficiency in your Kafka cluster. In Aiven for Apache Kafka®, you can :doc:`add quotas <../howto/manage-quotas>` to limit the data or requests exchanged by producers and consumers within a specific period, preventing issues like broker overload, network congestion, and service disruptions caused by excessive or malicious traffic. You can effectively manage resource consumption and ensure optimal user performance by implementing quotas. You can add and manage quotas using `Aiven Console `_ and `Aiven API `_. + +Using quotas offer several benefits: + +* **Resource management:** Quotas prevent individual clients from consuming excessive resources, thus ensuring fairness in resource allocation. +* **Stability:** Setting limits on network throughput and CPU usage helps maintain stability and prevent performance degradation of the Apache Kafka cluster. +* **Efficiency:** Quotas enable you to optimize resource utilization and achieve better overall efficiency within your Kafka deployment. + + + +Supported quota types +----------------------- + +Aiven for Apache Kafka provides different quotas to help you manage resources effectively. These quotas offer benefits in controlling network bandwidth and CPU usage: + +* **Consumer throttle (Network bandwidth quota):** This quota allows you to limit the amount of data a consumer can retrieve from the Kafka cluster per second. Setting a maximum network throughput prevents any single consumer from using excessive network bandwidth. +* **Producer throttle (Network bandwidth quota):** Similar to the consumer throttle, this quota limits the amount of data a producer can send to the Kafka cluster per second. It ensures that producers do not overload the system by sending excessive data, thereby maintaining system stability. +* **CPU throttle:** This quota is about managing CPU usage. You can manage CPU usage by setting a percentage of the total CPU time. Limiting the CPU resources for specific client IDs or users prevents any individual from monopolizing CPU resources, promoting fairness and efficient resource utilization. + + +Client ID and users in quotas +-------------------------------- +**Client ID** and **User** are two types of entities that can be used to enforce quotas in Kafka. + +**Client ID** + A Client ID is a unique identifier assigned to each client application or producer/consumer instance that connects to a Kafka cluster. It helps track the activity and resource usage of individual clients. When configuring quotas, you can set limits based on the Client ID, allowing you to control the amount of resources (such as network bandwidth or CPU) a specific client can utilize. + +**Users** + A User represents the authenticated identity of a client connecting to a cluster. With authentication mechanisms like SASL, users are associated with specific connections. By setting quotas based on Users, resource limits can be enforced per-user. + +Quotas enforcement +------------------- +Quotas enforcement ensures clients stay within their allocated resources. These quotas are implemented and controlled by the brokers on an individual basis. Each client group is assigned a specific quota for every broker, and when this threshold is reached, throttling mechanisms come into action. + +When a client exceeds its quota, the broker calculates the necessary delay to bring the client back within its allocated limits. Subsequently, the broker promptly responds to the client, indicating the duration of the delay. Additionally, the broker suspends communication with the client during this delay period. This cooperative approach from both sides ensures the effective enforcement of quotas. + +Quota violations are swiftly detected using short measurement windows, typically 30 windows of 1 second each. This ensures timely correction and prevents bursts of traffic followed by long delays, providing a better user experience. + +For more information, refer to `Enforcement `_ in the Apache Kafka® official documentation. + +.. seealso:: + * :doc:`How to add and manage quotas <../howto/manage-quotas>` diff --git a/docs/products/kafka/concepts/monitor-consumer-group.rst b/docs/products/kafka/concepts/monitor-consumer-group.rst new file mode 100644 index 0000000000..0d4d0c3f34 --- /dev/null +++ b/docs/products/kafka/concepts/monitor-consumer-group.rst @@ -0,0 +1,55 @@ +Monitoring consumer groups in Aiven for Apache Kafka® +====================================================== + +With Aiven for Apache Kafka® dashboards and telemetry, you can monitor the performance and system resources of your Aiven for Apache Kafka service. Aiven provides pre-built dashboards and telemetry for your service, allowing you to collect and visualize telemetry data using InfluxDB® and Grafana®. Aiven streamlines the process by automatically configuring the dashboards for each of your Aiven for Apache Kafka instances. + +This section builds on the :doc:`service integrations ` documentation and provides an in-depth look at consumer group graphs and related key terminology in Aiven for Apache Kafka®. Consumer group graphs offer valuable insights into the behavior of Apache Kafka consumers, which is crucial for maintaining a continuously running production Kafka system. + + +Topics +--------- +In Apache Kafka®, a topic serves as a unique channel for discussions. Producers send messages to the topic while consumers read those messages. For instance, in a topic named `soccer`, you can read what others say about soccer (acting as a consumer) or post messages about soccer (acting as a producer). + +Topic partitions +----------------- +The storage of messages for an Apache Kafka® topic can be spread across one or more topic partitions. For instance, in a topic that has 100 messages and is set up to have 5 partitions, 20 messages would be assigned to each partition. + +Consumer groups +---------------- + +Apache Kafka® allows multiple consumers to read messages from a Kafka topic. This improves the message consumption rate and overall performance. Organizing consumers into consumer groups identified by a group ID is common practice. Consumer groups consume messages from a topic with messages spread across multiple partitions. Apache Kafka ensures that each message is consumed by only one consumer, which is essential for certain classes of business applications. + +For example, with a topic having 100 messages across five partitions and five consumers in a consumer group, each consumer will be allocated a distinct partition, consuming 20 messages each. + +If the number of consumers exceeds the number of partitions, extra consumers remain idle until an active consumer exits. Also, a consumer cannot read from a partition not assigned to it. + + +Consumer group telemetry +------------------------- +Aiven for Apache Kafka provides built-in consumer group graphs that offer valuable telemetry to monitor and manage consumer groups effectively. + +Consumer group graph: consumer group replication lag +``````````````````````````````````````````````````````` +Consumer group lag is an important metric in your Apache Kafka dashboard. It shows how far behind the consumers in a group are in consuming messages on the topic. A significant lag could indicate one of two scenarios - terminated consumers or consumers who are alive but unable to keep up with the rate of incoming messages. Persistent lag for long durations may indicate that the system is not behaving according to plan, requiring investigation and follow-up actions to resolve the issue. + +The terms ``Consumer group lag`` and ``Consumer group replication lag`` can be used interchangeably. Consumer Group Lag is typically a metric provided by the client side, while Aiven computes its metric known as Consumer Group Replication Lag (``kafka_consumer_group_rep_lag``) by fetching information about partitions and consumer groups from broker side. This metric captures the difference between the latest published offset (high watermark) and the consumer group offset for the same partition. + +The consumer group graph below, which is enabled by default, provides valuable insights into consumer behavior. It displays the consumer group replication lag, indicating how far behind the consumers are in consuming messages from a topic. This graph provides information about consumer behavior, enabling you to take appropriate action if necessary. + + +.. image:: /images/products/kafka/consumer-group-graphs-for-kafka-dashboards.png + :alt: Image of consumer group replication lag + + +Consumer group offset telemetry +````````````````````````````````` +In Apache Kafka, messages are written into a partition as append-only logs and each message is assigned a unique incremental number called the offset. These offsets indicate the exact position of messages within the partition. + +Aiven for Apache Kafka provides offset telemetry, which can help understand message consumption patterns and troubleshoot issues. The ``kafka_consumer_group_offset`` metric identifies the consumer group's most recent committed offset, which can be used to determine its relative position within the assigned partitions. + + + + + + + diff --git a/docs/products/kafka/howto/kafka-custom-serde-encrypt.rst b/docs/products/kafka/howto/kafka-custom-serde-encrypt.rst index b41f845fab..af3a1c06b8 100644 --- a/docs/products/kafka/howto/kafka-custom-serde-encrypt.rst +++ b/docs/products/kafka/howto/kafka-custom-serde-encrypt.rst @@ -7,7 +7,7 @@ With the Aiven platform there are several deployment models available to meet yo - Enhanced Compliance Environments (ECE) to satisfy additional compliance needs such as HIPPA and PCI-DSS -- Bring your own account (BYOA) which allows deployment of Aiven services directly into your cloud account +- Bring your own cloud (BYOC) which allows deployment of Aiven services directly into your cloud account In addition to the above, all data transmitted to the Aiven services is encrypted in transit and at rest. diff --git a/docs/products/kafka/howto/manage-quotas.rst b/docs/products/kafka/howto/manage-quotas.rst new file mode 100644 index 0000000000..60fc513755 --- /dev/null +++ b/docs/products/kafka/howto/manage-quotas.rst @@ -0,0 +1,51 @@ +Manage quotas +============== +This section provides you with information on how to add and manage quotas for your Aiven for Apache Kafka® service using the `Aiven Console `_. + +For an overview of quotas, see :doc:`Quotas in Aiven for Apache Kafka <../concepts/kafka-quotas>` section for more information. + +.. note:: + To add quotas using APIs, see `Aiven API documentation `_. + +Add quota +------------ + +To add quota to your Aiven for Apache Kafka service, follow these steps: + +1. Log in to `Aiven Console `_ and select the Aiven for Apache Kafka service you want to manage. +2. Select the Quotas tab and click **Add quota**. +3. Enter the **Client ID** or **User** for which you want to set the quota. The *Client ID* represents a unique identifier assigned to a Kafka client, while the *User* refers to the user or user group associated with the client. +4. Choose one of the following quota types and enter the desired value for the selected quota type: + + * **Consumer throttle** (quota limit in bytes per second): Specify the maximum data transfer rate allowed for the consumer. + * **Producer throttle** (quota limit in bytes per second): Specify the maximum data transfer rate allowed for the producer. + * **CPU throttle** (quota limit as a percentage): Specify the maximum CPU usage allowed for the client. + + .. note:: + + Aiven also supports **default** quotas, which can be applied to all clients and/or users by using the keyword **default** in either the client ID or user field. + +5. Select **Add** to add quota. + +Additionally, you can add more quotas by selecting the **Add quota** option on the right-side. + +Update quota +-------------- + +To update an existing quota, follow these steps: + +1. Access the **Quotas** tab within the Aiven Console for your Apache Kafka service. +2. Locate the quota you want to update. +3. From the ellipsis menu, select **Update** to open the **Update quota** screen. +4. Modify the quota value as needed. +5. Select **Save changes** to save the changes and update the quota. + +Delete quota +--------------- +To remove a quota, follow these steps: + +1. Access the **Quotas** tab within the Aiven Console for your Apache Kafka service. +2. Locate the quota you want to delete. +3. From the ellipsis menu, select **Delete**. +4. On the confirmation dialog, select **Delete quota** to delete the quota. + diff --git a/docs/products/kafka/kafka-connect/howto/debezium-source-connector-pg.rst b/docs/products/kafka/kafka-connect/howto/debezium-source-connector-pg.rst index e76a20c41c..7623e65e84 100644 --- a/docs/products/kafka/kafka-connect/howto/debezium-source-connector-pg.rst +++ b/docs/products/kafka/kafka-connect/howto/debezium-source-connector-pg.rst @@ -134,10 +134,21 @@ When creating a Debezium source connector pointing to Aiven for PostgreSQL using Caused by: org.postgresql.util.PSQLException: ERROR: must be superuser to create FOR ALL TABLES publication -The error is due to Debezium trying to create a publication and failing because ``avnadmin`` is not a superuser. To avoid the problem you either: +The error is due to Debezium trying to create a publication and failing because ``avnadmin`` is not a superuser. There are 2 different ways of working around this issue: -* add the `"publication.autocreate.mode": "filtered"` parameter to the Debezium connector configuration to enable the publication creation only for the tables defined in the `table.include.list` parameter -* create the publication on the source database before configuring the connector as defined in the section below. +* either add the ``"publication.autocreate.mode": "filtered"`` parameter to the Debezium connector configuration to enable the publication creation only for the tables defined in the ``table.include.list`` parameter +* or create the publication on the source database before configuring the connector as defined in the section further below. + +Note that with older versions of Debezium, there was a bug preventing the addition of more tables to the filter with ``filtered`` mode. As a result, this configuration was not conflicting with a publication ``FOR ALL TABLES``. Starting with Debezium 1.9.7, those configurations are conflicting and you could get the following error: + +:: + + Caused by: org.postgresql.util.PSQLException: ERROR: publication "dbz_publication" is defined as FOR ALL TABLES + Detail: Tables cannot be added to or dropped from FOR ALL TABLES publications. + +The error is due to Debezium attempting to include more tables into the publication which is incompatible with ``FOR ALL TABLES``. + +You can get rid of this error by removing ``publication.autocreate.mode`` configuration, which will default to ``all_tables``. In case you want to maintain ``filtered`` mode for some reason, then the publication should be recreated accordingly, so as the replication slot. Create the publication in PostgreSQL '''''''''''''''''''''''''''''''''''' diff --git a/docs/products/kafka/kafka-connect/howto/mqtt-source-connector.rst b/docs/products/kafka/kafka-connect/howto/mqtt-source-connector.rst index aea206f208..da665ba1d8 100644 --- a/docs/products/kafka/kafka-connect/howto/mqtt-source-connector.rst +++ b/docs/products/kafka/kafka-connect/howto/mqtt-source-connector.rst @@ -1,5 +1,5 @@ -Create a sink connector from Apache Kafka® to MQTT -================================================== +Create a source connector from Apache Kafka® to MQTT +======================================================= The `MQTT source connector `_ copies messages from the MQTT topic into Apache Kafka® where they can be transformed and read by multiple consumers. Then, the Stream Reactor MQTT source connector creates a queue and binds it to the ``amq.topic`` defined in the KCQL statement, then messages are copied to the Apache Kafka® service. diff --git a/docs/products/kafka/kafka-mirrormaker/howto/integrate-external-kafka-cluster.rst b/docs/products/kafka/kafka-mirrormaker/howto/integrate-external-kafka-cluster.rst index de09ef298b..50e471e29e 100644 --- a/docs/products/kafka/kafka-mirrormaker/howto/integrate-external-kafka-cluster.rst +++ b/docs/products/kafka/kafka-mirrormaker/howto/integrate-external-kafka-cluster.rst @@ -17,3 +17,5 @@ An external Apache Kafka® service integration endpoint can be defined in the `A 4. Fill the **Endpoint name**, **Bootstrap servers** and the security settings and click **Create**. 5. The external Apache Kafka cluster is now available under the alias defined in the **Endpoint name** parameter + +.. note:: Configure the ACLs for both the source and target cluster such that the MirrorMaker 2 service can describe and create topics, as well as produce and consume messages. \ No newline at end of file diff --git a/docs/products/mysql/howto.rst b/docs/products/mysql/howto.rst index e862d851a3..6b450fc908 100644 --- a/docs/products/mysql/howto.rst +++ b/docs/products/mysql/howto.rst @@ -20,8 +20,8 @@ Aiven for MySQL® how-tos .. dropdown:: Data migration - :doc:`Perform a pre-migration check ` - - :doc:`Migrate to Aiven from an external MySQL with CLI ` - - :doc:`Migrate MySQL databases using Aiven Console ` + - :doc:`Migrate to Aiven from an external MySQL® with CLI ` + - :doc:`Migrate MySQL® databases using Aiven Console ` .. dropdown:: Disk space management @@ -31,11 +31,11 @@ Aiven for MySQL® how-tos .. dropdown:: Cluster management - - :doc:`Monitor a managed Aiven for ClickHouse® service ` - - :doc:`Resize a managed Aiven for ClickHouse® service ` + - :doc:`Monitor a managed Aiven for MySQL® service ` + - :doc:`Resize a managed Aiven for MySQL® service ` - :doc:`Schedule automatic maintenance updates ` - - :doc:`Upgrade a managed Aiven for ClickHouse® service ` - - :doc:`Tag a managed Aiven for ClickHouse® service ` - - :doc:`Power-off and delete a managed Aiven for ClickHouse® service ` - - :doc:`Migrate a managed Aiven for ClickHouse® service ` - - :doc:`Fork a managed Aiven for ClickHouse® service ` + - :doc:`Upgrade a managed Aiven for MySQL® service ` + - :doc:`Tag a managed Aiven for MySQL® service ` + - :doc:`Power-off and delete a managed Aiven for MySQL® service ` + - :doc:`Migrate a managed Aiven for MySQL® service ` + - :doc:`Fork a managed Aiven for MySQL® service ` diff --git a/docs/products/mysql/howto/migrate-db-to-aiven-via-console.rst b/docs/products/mysql/howto/migrate-db-to-aiven-via-console.rst index b8ee7ffee4..494c164d7e 100644 --- a/docs/products/mysql/howto/migrate-db-to-aiven-via-console.rst +++ b/docs/products/mysql/howto/migrate-db-to-aiven-via-console.rst @@ -156,7 +156,6 @@ Step 2 - validation * Hostname * Port - * Database name * Username * Password @@ -166,7 +165,9 @@ Step 2 - validation 2. Select the **SSL encryption recommended** checkbox. -3. Select **Run checks** to have the connection validated. +3. In the **Exclude databases** field, enter names of databases that you don't want to migrate (if any). + +4. Select **Run checks** to have the connection validated. .. topic:: Unable to use logical replication? @@ -186,32 +187,32 @@ Step 4 - replicating .. _stop-migration-mysql: -1. While the migration is in progress, you can +While the migration is in progress, you can - * Let it proceed until completed by selecting **Close window**, which closes the wizard. You come back to check the status at any time. +* Let it proceed until completed by selecting **Close window**, which closes the wizard. You come back to check the status at any time. - * Discontinue the migration by selecting **Stop migration**, which retains the data already migrated. For information on how to follow up on a stopped migration process, see :ref:`Start over `. +* Discontinue the migration by selecting **Stop migration**, which retains the data already migrated. For information on how to follow up on a stopped migration process, see :ref:`Start over `. - .. image:: /images/products/mysql/migration-in-progress-mysql.png - :width: 700px - :alt: Set up migration +.. image:: /images/products/mysql/migration-in-progress-mysql.png + :width: 700px + :alt: Set up migration - .. warning:: +.. warning:: - To avoid conflicts and replication issues while the migration is ongoing + To avoid conflicts and replication issues while the migration is ongoing - * Do not write to any tables in the target database that are being processed by the migration tool. - * Do not change the replication configuration of the source database manually. Don't modify ``wal_level`` or reduce ``max_replication_slots``. - * Do not make database changes that could disrupt or prevent the connection between the source database and the target database. Do not change the source database's listen address and do not modify or enable firewalls on the databases. + * Do not write to any tables in the target database that are being processed by the migration tool. + * Do not change the replication configuration of the source database manually. Don't modify ``wal_level`` or reduce ``max_replication_slots``. + * Do not make database changes that could disrupt or prevent the connection between the source database and the target database. Do not change the source database's listen address and do not modify or enable firewalls on the databases. .. topic:: Migration attempt failed? If you happen to get such a notification, investigate potential causes of the failure and try to fix the issues. When you're ready, trigger the migration again by selecting **Start over**. -1. When the wizard communicates the completion of the migration, select one of the following: +When the wizard communicates the completion of the migration, select one of the following: - * **Close connection** to disconnect the databases and stop the replication process if still active. - * **Keep replicating** if the replication is still ongoing and you want to keep the connection open for data synchronization. +* **Close connection** to disconnect the databases and stop the replication process if still active. +* **Keep replicating** if the replication is still ongoing and you want to keep the connection open for data synchronization. .. topic:: Replication mode active? diff --git a/docs/products/opensearch/concepts/backups.rst b/docs/products/opensearch/concepts/backups.rst index 6436a1ebe3..ceb210bcad 100644 --- a/docs/products/opensearch/concepts/backups.rst +++ b/docs/products/opensearch/concepts/backups.rst @@ -1,7 +1,7 @@ .. _opensearch-backup: -Backups -======= +Aiven for OpenSearch® backups +============================= Aiven for OpenSearch® databases are automatically backed up, :doc:`encrypted `, and stored securely in object storage. Backups are stored in the same region as the main service nodes. diff --git a/docs/products/opensearch/howto/list-opensearch-security.rst b/docs/products/opensearch/howto/list-opensearch-security.rst index 628064474e..a4ec214e60 100644 --- a/docs/products/opensearch/howto/list-opensearch-security.rst +++ b/docs/products/opensearch/howto/list-opensearch-security.rst @@ -1,5 +1,5 @@ -OpenSearch® Security management in Aiven for OpenSearch® -========================================================= +OpenSearch® Security management in Aiven for OpenSearch® |beta| +================================================================ Using OpenSearch Security can significantly strengthen the security of your service. By enabling and leveraging this feature for your Aiven for OpenSearch service, you gain access to a wide range of advanced functionalities that will allow you to manage security and access control effectively. diff --git a/docs/products/postgresql/concepts/pgvector.rst b/docs/products/postgresql/concepts/pgvector.rst new file mode 100644 index 0000000000..483b773b35 --- /dev/null +++ b/docs/products/postgresql/concepts/pgvector.rst @@ -0,0 +1,69 @@ +pgvector for AI-powered search in Aiven for PostgreSQL® +======================================================= + +In machine learning (ML) models, all data items in a particular data set are mapped into one unified n-dimensional vector space, no matter how big the input data set is. This optimized way of data representation allows for high performance of AI algorithms. Mapping regular data into a vector space requires so called data vectorizing, which is transforming data items into vectors (data structures with at least two components: magnitude and direction). On the vectorized data, you can perform AI-powered operations using different instruments, one of them being pgvector. + +Discover the pgvector extension to Aiven for PostgreSQL® and learn how it works. Check why you might need it and what benefits you get using it. + +About pgvector +-------------- + +pgvector is an open-source vector extension for similarity search. It's available as an extension to your Aiven for PostgreSQL® services. pgvector introduces capabilities to store and search over data of the vector type (ML-generated embeddings). Applying a specific index type for querying a table, the extension enables you to search for vector's exact nearest or approximate nearest neighbors (data items). + +Vector embeddings +''''''''''''''''' + +In machine learning, real-world objects and concepts (text, images, video, or audio) are represented as a set of continuous numbers residing in a high-dimensional vector space. These numerical representations are called vector embeddings, and the process of transformation into numerical representations is called vector embedding. Vector embedding allows ML algorithms to identify semantic and syntactic relationships between data, find patterns, and make predictions. Vector representations have different applications, for example, information retrieval, image classification, sentiment analysis, natural language processing, or similarity search. + +Vector similarity +''''''''''''''''' + +Since on vector embeddings you can use AI tools for capturing relationships between objects (vector representations), you are also able to identify similarities between them in an easily computable and scalable manner. + +A vector usually represents a data point, and components of the vector correspond to attributes of the data point. +In most cases, vector similarity calculations use distance metrics, for example, by measuring the straight-line distance between two vectors or the cosine of the angle between two vectors. The greater the resulting value of the similarity calculation is, the more similar the vectors are, with 0 as the minimum value and 1 as the maximum value. + +How pgvector works +------------------ + +Enabling pgvector + You enable the extension on your database. +Vectorizing data + You generate embeddings for your data, for example, for a products catalog using tools such as the `OpenAI API `_ client. +Storing embeddings + You store the embeddings in Aiven for PostgreSQL using the pgvector extension. +Querying embeddings + You use the embeddings for the vector similarity search on the products catalog. +Adding indices + By default, pgvector executes the *exact* nearest neighbor search, which gives the perfect recall. If you add an index to use the *approximate* nearest neighbor search, you can speed up your search, trading off some recall for performance. + +Why use pgvector +---------------- + +With the pgvector extension, you can perform the vector similarity search and use embedding techniques directly in Aiven for PostgreSQL. pgvector allows for efficient handling of high-dimensional vector data within the Aiven for PostgreSQL database for tasks such as similarity search, model training, data augmentation, or machine learning. + +pgvector helps you optimize and personalize the similarity search experience by improving searching speed and accuracy (also by adding indices). + +Typical use cases +----------------- + +There are multiple industry applications for similarity searches over vector embeddings: + +* e-commerce +* recommendation systems +* fraud detection + +.. topic:: Examples + + * AI-powered tools can find similarities between products or transactions, which can be used to produce product recommendations or detect potential scams or frauds. + * Sentiment analysis: words represented with similar vector embeddings have similar sentiment scores. + +What's next +----------- + +:doc:`Enable and use pgvector on Aiven for PostgreSQL® ` + +Related reading +--------------- + +`pgvector README on GitHub `_ diff --git a/docs/products/postgresql/concepts/upgrade-failover.rst b/docs/products/postgresql/concepts/upgrade-failover.rst index ee47f99107..cac7bd60d3 100644 --- a/docs/products/postgresql/concepts/upgrade-failover.rst +++ b/docs/products/postgresql/concepts/upgrade-failover.rst @@ -58,6 +58,9 @@ During maintenance updates, cloud migrations, or plan changes, the below procedu .. Note:: The old primary server is kept alive for a short period of time (minimum 60 seconds) with a TCP forwarding setup pointing to the new primary server allowing clients to connect before learning the new IP address. +.. Note:: + If the service plan is changed from a business plan that has two nodes to a startup plan which only has one node of the same tier (for example, business-8 to startup-8), the standby node is removed while the primary node is retained, and connections to the primary are not affected by the downgrade. Similarly, upgrading the service plan from a startup one to a business one adds a standby node to the service cluster, and connections to the primary node are unaffected. + Recreation of replication slots ------------------------------- diff --git a/docs/products/postgresql/howto.rst b/docs/products/postgresql/howto.rst index 3b963bac71..c7ed4df4f3 100644 --- a/docs/products/postgresql/howto.rst +++ b/docs/products/postgresql/howto.rst @@ -38,6 +38,7 @@ Aiven for PostgreSQL® how-tos - :doc:`Optimize PostgreSQL® slow queries ` - :doc:`Check and avoid transaction ID wraparound ` - :doc:`Prevent PostgreSQL® full disk issues ` + - :doc:`Enable and use pgvector on Aiven for PostgreSQL® ` .. dropdown:: Migration diff --git a/docs/products/postgresql/howto/datasource-integration.rst b/docs/products/postgresql/howto/datasource-integration.rst index 8ef21344e0..8582f8860a 100644 --- a/docs/products/postgresql/howto/datasource-integration.rst +++ b/docs/products/postgresql/howto/datasource-integration.rst @@ -1,14 +1,14 @@ Connect two PostgreSQL® services via datasource integration =========================================================== -There are two types of datasource integrations you can use with Aiven for PostgreSQL®: :doc:`Aiven for Grafana® `, and another Aiven for PostgreSQL® service. If you are connecting two PostgreSQL® services together, perhaps to :doc:`query across them `, but still want to have a restricted IP allow-list, then you will need to use the ``Allow IP-List`` service integration. +There are two types of datasource integrations you can use with Aiven for PostgreSQL®: :doc:`Aiven for Grafana® `, and another Aiven for PostgreSQL® service. If you are connecting two PostgreSQL® services together, perhaps to :doc:`query across them `, but still want to have a restricted IP allow-list, then you will need to use the ``IP Allow-List`` service integration. -Whenever a service node needs to be recycled, e.g. for maintenance, a new node is created with a new IP address. As the new IP address cannot be predicted, if you want to maintain a connection between two PostgreSQL services your choices are either to have a very broad IP allow-list (which might be acceptable in the private IP-range of a project VPC) or to use the ``Allow IP-List`` service integration to dynamically create an IP allow-list entry for the other PostgreSQL service. +Whenever a service node needs to be recycled, e.g. for maintenance, a new node is created with a new IP address. As the new IP address cannot be predicted, if you want to maintain a connection between two PostgreSQL services your choices are either to have a very broad IP allow-list (which might be acceptable in the private IP-range of a project VPC) or to use the ``IP Allow-List`` service integration to dynamically create an IP allow-list entry for the other PostgreSQL service. Integrate two PostgreSQL services --------------------------------- -1. On the service overview page for your PostgreSQL service, go to **Manage Integrations** and choose the **Allow IP-List** option. +1. On the service overview page for your PostgreSQL service, go to **Manage Integrations** and choose the **IP Allow-List** option. 2. Choose either a new or existing PostgreSQL service. diff --git a/docs/products/postgresql/howto/list-dba-tasks.rst b/docs/products/postgresql/howto/list-dba-tasks.rst index 646dfdc925..56c167c846 100644 --- a/docs/products/postgresql/howto/list-dba-tasks.rst +++ b/docs/products/postgresql/howto/list-dba-tasks.rst @@ -66,3 +66,7 @@ Database administration tasks .. grid-item-card:: :doc:`Prevent PostgreSQL® full disk issues ` :shadow: md :margin: 2 2 0 0 + + .. grid-item-card:: :doc:`Enable and use pgvector on Aiven for PostgreSQL® ` + :shadow: md + :margin: 2 2 0 0 diff --git a/docs/products/postgresql/howto/report-metrics-grafana.rst b/docs/products/postgresql/howto/report-metrics-grafana.rst index 5802dff5d3..ef99ee4492 100644 --- a/docs/products/postgresql/howto/report-metrics-grafana.rst +++ b/docs/products/postgresql/howto/report-metrics-grafana.rst @@ -4,25 +4,22 @@ Monitor PostgreSQL® metrics with Grafana® As well as offering PostgreSQL-as-a-service, the Aiven platform gives you access to monitor the database. The metrics/dashboard integration in the Aiven console lets you send PostgreSQL® metrics to an external endpoint like Datadog or to create an integration and a :doc:`prebuilt dashboard <../reference/pg-metrics>` in Aiven for Grafana®. Get detailed information about the metrics and dashboard sections in :doc:`../reference/pg-metrics`. -Push PostgreSQL metrics to InfluxDB®, M3DB or PostgreSQL --------------------------------------------------------- +Push PostgreSQL metrics to InfluxDB® or M3DB +-------------------------------------------- To collect metrics about your PostgreSQL service you will need to configure a metrics integration and nominate somewhere to store the collected metrics. -1. On the service overview page for your PostgreSQL service, go to "Manage Integrations" and choose the "Metrics" option with "**Send** service metrics to InfluxDB, M3DB or PostgreSQL service" as its description. +1. On the service overview page for your PostgreSQL service, go to "Manage Integrations" and choose the "Metrics" option with "**Send** service metrics to InfluxDB or M3DB service" as its description. -2. Choose either a new or existing InfluxDB®, M3DB or PostgreSQL service. +2. Choose either a new or existing InfluxDB® or M3DB service. - A new service will ask you to select the cloud, region and plan to use. You should also give your service a name. The service overview page shows the nodes rebuilding, and then indicates when they are ready. - - If you're already using InfluxDB, M3DB or PostgreSQL on Aiven, you can submit your PostgreSQL metrics to the existing service. - -.. Warning:: - You can send your PostgreSQL service metrics to the same instance. This is not recommended since it increases the load on the monitored system and could also be affected in the event of problems with the database. + - If you're already using InfluxDB or M3DB on Aiven, you can submit your PostgreSQL metrics to the existing service. Provision and configure Grafana ------------------------------- -3. Select the target InfluxDB, M3DB or PostgreSQL database service and go to its service page. Under "Manage Integrations", choose the "Dashboard" option to make the metrics available on that platform. +3. Select the target InfluxDB or M3DB service and go to its service page. Under "Manage Integrations", choose the "Dashboard" option to make the metrics available on that platform. 4. Choose either a new or existing Grafana service. - A new service will ask you to select the cloud, region and plan to use. You should also give your service a name. The service overview page shows the nodes rebuilding, and then indicates when they are ready. diff --git a/docs/products/postgresql/howto/use-pgvector.rst b/docs/products/postgresql/howto/use-pgvector.rst new file mode 100644 index 0000000000..0cdea7446c --- /dev/null +++ b/docs/products/postgresql/howto/use-pgvector.rst @@ -0,0 +1,107 @@ +Enable and use pgvector on Aiven for PostgreSQL® +================================================ + +This article provides step-by-step instructions on enabling, using, and disabling the pgvector extension for your Aiven for PostgreSQL service. + +About using pgvector +-------------------- + +The pgvector extension allows you to perform the vector similarity search and use embedding techniques directly in Aiven for PostgreSQL. See :doc:`pgvector for AI-powered search ` for more information on what pgvector is and how it works. + +Prerequisites +------------- + +* Aiven account +* Aiven for PostgreSQL service running on PostgreSQL 13 or newer PostgreSQL versions +* psql and a psql CLI client +* Vector embeddings generated (for example, with the `OpenAI API `_ client) + +Enable pgvector +--------------- + +Run the CREATE EXTENSION statement from a client such as psql connected to your service. This is needed for each database you want to perform the similarity search on. + +1. :doc:`Connect to your Aiven for PostgreSQL service ` using, for example, the psql client (CLI). +2. Connect to your database where you want to operate. + + .. code-block:: bash + + \c database-name + +3. Run the CREATE EXTENSION statement. + + .. code-block:: bash + + CREATE EXTENSION vector; + +Store embeddings +---------------- + +1. Create a table to store the generated vector embeddings. Use the CREATE TABLE SQL command, adjusting the dimensions as needed. + + .. code-block:: bash + + CREATE TABLE items (id bigserial PRIMARY KEY, embedding vector(3)); + + .. note:: + + As a result, the ``items`` table is created. The table includes the ``embedding`` column, which can store vectors with three dimensions. + +2. Run the INSERT statement to store the embeddings generated with, for example, the `OpenAI API `_ client. + + .. code-block:: bash + + INSERT INTO items (embedding) VALUES ('[1,2,3]'), ('[4,5,6]'); + + .. note:: + + As a result, two new rows are inserted into the ``items`` table with the provided embeddings. + +Perform similarity search +------------------------- + +To calculate similarity, run the SELECT statements using the built-in vector operators. + +.. code-block:: bash + + SELECT * FROM items ORDER BY embedding <-> '[3,1,2]' LIMIT 5; + +.. note:: + + As a result, the query computes the L2 distance between the selected vector and the vectors stored in the ``items`` table, arrange the results based on the calculated distance, and outputs its top five nearest neighbors (most similar items). + +.. topic:: Operators for calculating similarity + + * ``<->`` - Euclidean distance (L2 distance) + * ``<#>`` - negative inner product + * ``<=>`` - cosine distance + +Add indices +----------- + +You can add an index on the vector column to use the *approximate* nearest neighbor search (instead of the default the *exact* nearest neighbor search). This can improve query performance with an ignorable cost on recall. Add an index is possible for all distance functions (L2 distance, cosine distance, inner product). + +To add an index, run a query similar to the following: + +.. code-block:: bash + + CREATE INDEX ON items USING ivfflat (embedding vector_l2_ops) WITH (lists = 100); + +.. note:: + + As a result, the index is added to the ``embedding`` column for the L2 distance function. + +Disable pgvector +---------------- + +To stop the pgvector extension and remove it from a database, run the following SQL command: + +.. code-block:: bash + + DROP EXTENSION vector; + +Related reading +--------------- + +* :doc:`pgvector for AI-powered search in Aiven for PostgreSQL® ` +* `pgvector README on GitHub `_ diff --git a/docs/products/postgresql/reference/list-of-extensions.rst b/docs/products/postgresql/reference/list-of-extensions.rst index d256d4acde..98ff081de9 100644 --- a/docs/products/postgresql/reference/list-of-extensions.rst +++ b/docs/products/postgresql/reference/list-of-extensions.rst @@ -39,6 +39,9 @@ Data types ``ltree`` - https://www.postgresql.org/docs/current/ltree.html Data type for hierarchical tree-like structures. +``pgvector`` - https://github.com/pgvector/pgvector + Type for vector similarity search. |PG13onwards| + ``seg`` - https://www.postgresql.org/docs/current/seg.html Data type for representing line segments or floating-point intervals. diff --git a/docs/products/postgresql/reference/pg-connection-limits.rst b/docs/products/postgresql/reference/pg-connection-limits.rst index ba9b24a44f..0028a7bcf2 100644 --- a/docs/products/postgresql/reference/pg-connection-limits.rst +++ b/docs/products/postgresql/reference/pg-connection-limits.rst @@ -8,7 +8,7 @@ Aiven for PostgreSQL® instances limit the number of allowed connections to make * - Plan - Max Connections - * - Hobbyist + * - Hobbyist (Google Cloud, DigitalOcean, and UpCloud only) - 25 * - Startup/Business/Premium-4 - 100 diff --git a/docs/products/redis/concepts/overview.rst b/docs/products/redis/concepts/overview.rst index 1627afa383..b400cdc179 100644 --- a/docs/products/redis/concepts/overview.rst +++ b/docs/products/redis/concepts/overview.rst @@ -19,7 +19,7 @@ Aiven for Redis has many features that make it easy and stress-free to use: * **Managed service:** Aiven for Redis is fully managed, so you don't have to worry about setup, management, or updates. Aiven provides tools and integrations to help you easily use Redis in your data pipelines. -* **Fast and easy deployment:** Aiven for Redis provides production-ready Redis service within a few minutes. You can deploy Redis to the cloud of your choice from 5 public clouds and over 100 regions. Aiven uses high-performance clusters with carefully selected instance types and storage options for top-notch performance. A Bring-your-own-account (BYOA) deployment model is available for strict control requirements. +* **Fast and easy deployment:** Aiven for Redis provides production-ready Redis service within a few minutes. You can deploy Redis to the cloud of your choice from 5 public clouds and over 100 regions. Aiven uses high-performance clusters with carefully selected instance types and storage options for top-notch performance. A Bring-your-own-cloud (BYOC) deployment model is available for strict control requirements. * **Integration with data infrastructure:** Aiven ensures secure network connectivity using VPC peering, PrivateLink, or TransitGateway technologies. Aiven integrates with various observability tooling, including Datadog, Prometheus, and Jolokia, or you can use Aiven's observability tools for improved monitoring and logging. diff --git a/docs/products/redis/howto/configure-acl-permissions.rst b/docs/products/redis/howto/configure-acl-permissions.rst index 7ab7f00a55..804362bc7a 100644 --- a/docs/products/redis/howto/configure-acl-permissions.rst +++ b/docs/products/redis/howto/configure-acl-permissions.rst @@ -1,50 +1,79 @@ Configure ACL permissions in Aiven for Redis®* ============================================== -Use the Aiven console or the Aiven client to create custom Access Control Lists (ACLs). +Redis®* uses `Access Control Lists (ACLs) `_ to restrict the usage of commands and keys based on specific username and password combinations. In Aiven for Redis®*, the direct use of `ACL * `_ commands is not allowed to maintain the reliability of replication, configuration management, and disaster recovery backups for the default user. However, you have the flexibility to create custom ACLs using either the `Aiven Console `_ or the :doc:`Aiven CLI `. -Redis®* uses `ACLs `_ to restrict the usage of commands and keys available for connecting for a specific username and password. Aiven for Redis®*, however, does not allow use of the `ACL * `_ commands directly in order to guarantee the reliability of replication, configuration management, or backups for disaster recovery for the default user. You can use the console or the client to create custom ACLs instead. +With the Aiven Console or Aiven CLI, you can customize ACL permissions to align with your requirements. This gives you granular control over access and ensures optimal security within your Aiven for Redis®* service. -Create an ACL using the Aiven console -------------------------------------- +Create user and configure ACLs using console +----------------------------------------------- +Follow the steps below to create a Redis user and configure ACLs: -1. Log in to the `Aiven web console `_. +1. Log in to `Aiven Console `_ and select your Aiven for Redis service from the list of available services. +2. From the **Overview** page of your Redis service, navigate to the **Users** tab. +3. Select **Create user**, and provide the following details: + + * **Username:** Specify a username for the user. + * **Categories:** Specify the command categories the user can access within Aiven for Redis. For example, you can use the prefix ``+@all`` or a similar convention to grant users access to all categories. Separate each category entry with a single space. + * **Commands:** Specify the commands the user can execute, separating each command by a single space. For example, you can enter ``+set -get`` to grant the user permission to execute the SET command and deny access to the GET command. + * **Channels:** Specify the channels the user can access within the Publish/Subscribe (Pub/Sub) messaging pattern. Separate each channel entry with a single space. + * **Keys:** Specify the keys the user can interact with. For example, you can specify keys like ``user:123`` or ``product:456``, or ``order:789`` to grant the user access to interact with these specific keys in Aiven for Redis. + +4. Once you have defined the ACL permissions for the user, select **Save** to create the user. -2. From the *Services* page, select the Redis service you want to create an ACL for. - The *Overview* page for the service opens. +User management +---------------- +You have various management options available for Aiven for Redis users. Follow the instructions below for each operation: -3. Click the **Users and ACL**. +Reset password +````````````````` +1. In the **Users** tab, locate the user you want to reset the password and select the ellipses next to their row. +2. Select **Reset password** from the drop-down menu. +3. Confirm the password reset by selecting **Reset** on the confirmation screen. -4. Click **+ Add Service User**. +Edit ACL rules +``````````````` +1. In the **Users** tab, locate the user you want to edit ACL rules and select the ellipses next to their row. +2. Select **Edit ACL rules** from the drop-down menu. +3. Make the desired changes to the ACL rules on the **Edit access control** screen. +4. Select the **Save** to apply the modifications. - The *New Redis User* pop-up opens. +Duplicate user +``````````````` +1. In the **Users** tab, locate the user you want to duplicate and select the icon next to their row. +2. Select **Duplicate user** from the options in the drop-down menu. +3. Enter a name for the new user in the **Duplicate user** screen. +4. Click on the **Add user** button to create a duplicate user. -5. Create a user, and define which **Keys**, **Categories**, **Commands** or **Channels** the user can access. +Delete user +````````````` +1. Locate the user you want to delete from the user list and select the icon next to their row. +2. Select **Delete** from the options in the drop-down menu. +3. Confirm the deletion by selecting **Delete** on the confirmation screen. - In this example, the ``test`` user can only retrieve keys with the pattern ``mykeys.*``. - .. image:: /images/products/redis/redis-acl.png - :alt: Screenshot of the ACL configuration screen +Create user and configure ACLs using Aiven CLI +----------------------------------------------- -6. Click **Save**. +To create a user and configure ACLs using the Aiven CLI, follow these steps: +1. Set up the :doc:`CLI tool `. -Create an ACL using the Aiven CLI ---------------------------------- +2. Create a user named ``mynewuser`` with read-only access to the ``mykeys.*`` keys using the following command: -1. Set up the :doc:`CLI tool ` if you don't have it already. + :: -2. Create a user for ``mynewuser`` with read-only access to the ``mykeys.*`` keys:: + avn service user-create --project myproject myservicename --username mynewuser --redis-acl-keys 'mykeys.*' --redis-acl-commands '+get' --redis-acl-categories '' - avn service user-create --project myproject myservicename --username mynewuser --redis-acl-keys 'mykeys.*' --redis-acl-commands '+get' --redis-acl-categories '' +3. Confirm the ACL is applied by connecting to the service using the new username and password: + + :: -3. Confirm the ACL is applied by connecting to the service using the new username and password:: + redis-cli --user mynewuser --pass ... --tls -h myservice-myproject.aivencloud.com -p 12719 - redis-cli --user mynewuser --pass ... --tls -h myservice-myproject.aivencloud.com -p 12719 - - myservice-myproject.aivencloud.com:12719> get mykeys.hello - (nil) - myservice-myproject.aivencloud.com:12719> set mykeys.hello world - (error) NOPERM this user has no permissions to run the 'set' command or its subcommand + myservice-myproject.aivencloud.com:12719> get mykeys.hello + (nil) + myservice-myproject.aivencloud.com:12719> set mykeys.hello world + (error) NOPERM this user has no permissions to run the 'set' command or its subcommand diff --git a/docs/tools/terraform/howto/update-deprecated-resources.rst b/docs/tools/terraform/howto/update-deprecated-resources.rst new file mode 100644 index 0000000000..ae4f3707f3 --- /dev/null +++ b/docs/tools/terraform/howto/update-deprecated-resources.rst @@ -0,0 +1,49 @@ +Update deprecated resources +============================ + +Use the following steps to migrate from resources that have been deprecated or renamed without destroying existing resources. + +.. tip:: + Backup your Terraform state file ``terraform.tfstate`` to use in the case of a rollback. + +In the following example, the ``aiven_database`` field is migrated to the new ``aiven_pg_database`` field for an Aiven for PostgreSQL® service. + +1. Replace references to the deprecated field with the new field. In the following file ``aiven_database`` was replaced with ``aiven_pg_database``: + +.. code:: + + - resource "aiven_database" "mydatabase" { + project = aiven_project.myproject.project + service_name = aiven_pg.mypg.service_name + database_name = "" + } + + + + resource "aiven_pg_database" "mydatabase" { + project = aiven_project.myproject.project + service_name = aiven_pg.mypg.service_name + database_name = "" + } + +2. View a list of all resources in the state file:: + + terraform state list + +3. Remove the resource from the control of Terraform:: + + terraform state rm + +.. tip:: + Use the ``-dry-run`` flag to preview the changes without applying them. + +4. Add the resource back to Terraform by importing it as a new resource:: + + terraform import project_name/service_name/db_name + +5. Check that the import is going to run as you expect:: + + terraform plan + +6. Apply the new configuration:: + + terraform apply diff --git a/docs/tools/terraform/howto/vpc-peering-aws.rst b/docs/tools/terraform/howto/vpc-peering-aws.rst new file mode 100644 index 0000000000..03d7db15ce --- /dev/null +++ b/docs/tools/terraform/howto/vpc-peering-aws.rst @@ -0,0 +1,178 @@ +AWS virtual network peering +============================= + +This help article provides step-by-step instructions for setting up a VPC peering connection between Aiven and Amazon Web Services Platform (AWS) using Terraform. See the `Using VPC +peering `__ +article for how to set up a Project VPC. + +Before you start, make sure you have an Aiven authentication token and have set up the AWS CLI. + +Prerequisites: +~~~~~~~~~~~~~~~~ + +* Create an :doc:`Aiven authentication token `. + +* Install the AWS CLI https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html. + +* Configure the AWS CLI https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html. + +Set up the Terraform variables: +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Create a file named ``variables.tf`` and add the following code: + +.. code-block:: + + variable "aiven_api_token" {} + variable "aws_account_id" {} + variable "aiven_project_name" {} + +This file declares the variables for the Aiven API token, Aiven project name and the AWS account ID. + +Configure the Terraform providers: +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Create a file named ``provider.tf`` and add the following code: + +.. code-block:: + + terraform { + required_providers { + aiven = { + source = "aiven/aiven" + version = ">= 4.0.0, < 5.0.0" + } + + aws = { + source = "hashicorp/aws" + version = "~> 5.0" + } + } + } + + provider "aiven" { + api_token = var.aiven_api_token + } + + provider "aws" { + region = "ap-southeast-2" + } + +This code initializes the Aiven and AWS providers, specifying the required provider versions and configurations. It also uses the variables defined in the ``variables.tf`` file + +Create a VPC and subnet in AWS: +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Create a file named project.tf and add the following code: + +.. code-block:: + + # Create a VPC in AWS + resource "aws_vpc" "awsvpc" { + cidr_block = "10.0.0.0/16" + enable_dns_hostnames = true + tags = { + Name = "test-vpc" + } + } + + # Create a subnet in the AWS VPC + resource "aws_subnet" "awssubnet1" { + vpc_id = aws_vpc.awsvpc.id + cidr_block = "10.0.1.0/24" + + tags = { + Name = "test-subnet1" + } + } + + #Get Aiven project details + data "aiven_project" "my_project" { + project = var.aiven_project_name + } + +This code retrieves the details of your Aiven project, creates a VPC in AWS, and creates a subnet within that VPC. + +Create a VPC in Aiven: +~~~~~~~~~~~~~~~~~~~~~~ + +Add the following code to your ``project.tf`` file to create a VPC in Aiven: + +.. code-block:: + + # Create Aiven Project VPC + resource "aiven_project_vpc" "my_vpc" { + project = data.aiven_project.my_project.project + cloud_name = "aws-ap-southeast-2" + network_cidr = "192.168.0.0/24" + } + +This code creates a VPC in your Aiven project. The ``network_cidr`` parameter specifies the CIDR range for the Aiven VPC. Ensure that this CIDR range does not overlap with the CIDR range of your AWS VPC. In this example, the Aiven VPC uses the CIDR range "192.168.0.0/24" + +Create a peering connection between Aiven and AWS: +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Add the following code to your project.tf file to create a peering connection between the Aiven VPC and your AWS VPC: + +.. code-block:: + + # Create a VPC peering from Aiven. + resource "aiven_aws_vpc_peering_connection" "peertoaws" { + vpc_id = aiven_project_vpc.my_vpc.id + aws_account_id = var.aws_account_id + aws_vpc_id = aws_vpc.awsvpc.id + aws_vpc_region = "ap-southeast-2" + depends_on = [ + aiven_project_vpc.my_vpc, aws_vpc.awsvpc + ] + + } + # Accept the VPC peering initiated from Aiven. + resource "aws_vpc_peering_connection_accepter" "peer" { + vpc_peering_connection_id = aiven_aws_vpc_peering_connection.peertoaws.aws_vpc_peering_connection_id + auto_accept = true + + tags = { + Side = "Accepter" + } + + depends_on = [ + aiven_aws_vpc_peering_connection.peertoaws + ] + } + + # Route tables should be updated, this is an example routing the Aiven VPC CIDR through the peering connection. + resource "aws_route_table" "route_aiven" { + vpc_id = aws_vpc.awsvpc.id + + route { + cidr_block = "192.168.0.0/24" + vpc_peering_connection_id = aiven_aws_vpc_peering_connection.peertoaws.aws_vpc_peering_connection_id + } + } + # Route table should be associated to the subnets. + resource "aws_route_table_association" "subnet1_aiven" { + subnet_id = aws_subnet.awssubnet1.id + route_table_id = aws_route_table.route_aiven.id + } + +This code creates a peering connection between the Aiven VPC and the AWS VPC by using the ``aiven_aws_vpc_peering_connection`` and ``aws_vpc_peering_connection_accepter`` resources. The depends_on attribute ensures that the required resources exist before the new resource is created. Route tables should be updated/created to enable routes to the Aiven VPC from AWS VPC. + +Apply the Terraform configuration and verify the VPC peering status: +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Run the following commands to initialize and apply the Terraform configuration: + +.. code-block:: console + + terraform init + terraform apply + +Review the proposed changes and enter yes when prompted to proceed. Terraform will create the VPC peering connection between Aiven and AWS. After the resources have been created, verify that the VPC peering connection is active by checking the state attribute of the ``aiven_aws_vpc_peering_connection`` resource. It should have changed from "PENDING_PEER" to "ACTIVE", this may take some minutes (10-15). In order to refresh the status and show current status run the following code: + +.. code-block:: console + + terraform apply + terraform show + +Look for the ``aiven_aws_vpc_peering_connection`` resource in the output, and confirm that the state attribute is set to "ACTIVE". This indicates that the VPC peering connection between Aiven and AWS has been successfully established. \ No newline at end of file diff --git a/images/platform/billing/billing_assign_card.png b/images/platform/billing/billing_assign_card.png deleted file mode 100644 index 52205289af..0000000000 Binary files a/images/platform/billing/billing_assign_card.png and /dev/null differ diff --git a/images/platform/billing/billing_user_information.png b/images/platform/billing/billing_user_information.png deleted file mode 100644 index c1f3585a50..0000000000 Binary files a/images/platform/billing/billing_user_information.png and /dev/null differ diff --git a/images/platform/byoa-ipsec-ingress-direct.png b/images/platform/byoc-ipsec-ingress-direct.png similarity index 100% rename from images/platform/byoa-ipsec-ingress-direct.png rename to images/platform/byoc-ipsec-ingress-direct.png diff --git a/images/platform/byoa-ipsec-ingress.png b/images/platform/byoc-ipsec-ingress.png similarity index 100% rename from images/platform/byoa-ipsec-ingress.png rename to images/platform/byoc-ipsec-ingress.png diff --git a/images/platform/byoa-standard.png b/images/platform/byoc-standard.png similarity index 100% rename from images/platform/byoa-standard.png rename to images/platform/byoc-standard.png diff --git a/images/platform/concepts/backup_location_preview.png b/images/platform/concepts/backup_location_preview.png new file mode 100644 index 0000000000..07e0ed1bde Binary files /dev/null and b/images/platform/concepts/backup_location_preview.png differ diff --git a/images/platform/howto/aws-marketplace-listing.png b/images/platform/howto/aws-marketplace-listing.png new file mode 100644 index 0000000000..7b20b5efe1 Binary files /dev/null and b/images/platform/howto/aws-marketplace-listing.png differ diff --git a/images/products/kafka/consumer-group-graphs-for-kafka-dashboards.png b/images/products/kafka/consumer-group-graphs-for-kafka-dashboards.png new file mode 100644 index 0000000000..7ae6cd2598 Binary files /dev/null and b/images/products/kafka/consumer-group-graphs-for-kafka-dashboards.png differ diff --git a/images/products/mysql/connect-source-mysql.png b/images/products/mysql/connect-source-mysql.png index 36225a5cd9..9bf8ed1160 100644 Binary files a/images/products/mysql/connect-source-mysql.png and b/images/products/mysql/connect-source-mysql.png differ diff --git a/images/products/mysql/migration-completed-mysql.png b/images/products/mysql/migration-completed-mysql.png index 7a53f197f3..a51a63a2ff 100644 Binary files a/images/products/mysql/migration-completed-mysql.png and b/images/products/mysql/migration-completed-mysql.png differ diff --git a/images/products/mysql/migration-in-progress-mysql.png b/images/products/mysql/migration-in-progress-mysql.png index 62a8b88a1e..c886613845 100644 Binary files a/images/products/mysql/migration-in-progress-mysql.png and b/images/products/mysql/migration-in-progress-mysql.png differ diff --git a/images/products/mysql/ready-to-migrate-mysql.png b/images/products/mysql/ready-to-migrate-mysql.png index 19dbb6575d..d8158a2963 100644 Binary files a/images/products/mysql/ready-to-migrate-mysql.png and b/images/products/mysql/ready-to-migrate-mysql.png differ diff --git a/images/products/mysql/start-migration-mysql.png b/images/products/mysql/start-migration-mysql.png index b54c6e3e96..db1ef97d5c 100644 Binary files a/images/products/mysql/start-migration-mysql.png and b/images/products/mysql/start-migration-mysql.png differ diff --git a/includes/clouds-list.rst b/includes/clouds-list.rst index 4bb2a42adf..7260ff68e7 100644 --- a/includes/clouds-list.rst +++ b/includes/clouds-list.rst @@ -26,6 +26,9 @@ Amazon Web Services * - Asia-Pacific - ``aws-ap-south-1`` - Asia, India + * - Asia-Pacific + - ``aws-ap-south-2`` + - Asia, India * - Asia-Pacific - ``aws-ap-southeast-1`` - Asia, Singapore @@ -35,15 +38,24 @@ Amazon Web Services * - Australia - ``aws-ap-southeast-2`` - Australia, New South Wales + * - Australia + - ``aws-ap-southeast-4`` + - Australia, Melbourne * - Europe - ``aws-eu-central-1`` - Europe, Germany + * - Europe + - ``aws-eu-central-2`` + - Europe, Switzerland * - Europe - ``aws-eu-north-1`` - Europe, Sweden * - Europe - ``aws-eu-south-1`` - Europe, Italy + * - Europe + - ``aws-eu-south-2`` + - Europe, Spain * - Europe - ``aws-eu-west-1`` - Europe, Ireland @@ -143,6 +155,9 @@ Azure * - Europe - ``azure-norway-west`` - Europe, Norway + * - Europe + - ``azure-sweden-central`` + - Europe, Gävle * - Europe - ``azure-switzerland-north`` - Europe, Switzerland @@ -155,6 +170,9 @@ Azure * - Europe - ``azure-westeurope`` - Europe, Netherlands + * - Middle East + - ``azure-qatar-central`` + - Middle East, Doha * - Middle East - ``azure-uae-north`` - Middle East, United Arab Emirates @@ -188,6 +206,9 @@ Azure * - North America - ``azure-westus2`` - United States, Washington + * - North America + - ``azure-westus3`` + - United States, Phoenix * - South America - ``azure-brazilsouth`` - South America, Brazil @@ -238,109 +259,109 @@ Google Cloud - Description * - Asia-Pacific - ``google-asia-east1`` - - Taiwan + - Asia, Taiwan * - Asia-Pacific - ``google-asia-east2`` - - Hong Kong + - Asia, Hong Kong * - Asia-Pacific - ``google-asia-northeast1`` - - Tokyo + - Asia, Japan * - Asia-Pacific - ``google-asia-northeast2`` - - Osaka + - Asia, Japan * - Asia-Pacific - ``google-asia-northeast3`` - - Seoul + - Asia, Korea * - Asia-Pacific - ``google-asia-south1`` - - Mumbai + - Asia, India * - Asia-Pacific - ``google-asia-south2`` - - Delhi + - Asia, India * - Asia-Pacific - ``google-asia-southeast1`` - - Singapore + - Asia, Singapore * - Asia-Pacific - ``google-asia-southeast2`` - - Jakarta + - Asia, Indonesia * - Australia - ``google-australia-southeast1`` - - Sydney + - Australia, New South Wales * - Australia - ``google-australia-southeast2`` - - Melbourne + - Australia, Victoria * - Europe - ``google-europe-central2`` - - Warsaw + - Europe, Poland * - Europe - ``google-europe-north1`` - - Finland + - Europe, Finland * - Europe - ``google-europe-southwest1`` - - Madrid + - Europe, Madrid * - Europe - ``google-europe-west1`` - - Belgium + - Europe, Belgium * - Europe - ``google-europe-west2`` - - London + - Europe, England * - Europe - ``google-europe-west3`` - - Frankfurt + - Europe, Germany * - Europe - ``google-europe-west4`` - - Netherlands + - Europe, Netherlands * - Europe - ``google-europe-west6`` - - Zurich + - Europe, Switzerland * - Europe - ``google-europe-west8`` - - Milan + - Europe, Italy * - Europe - ``google-europe-west9`` - - Paris + - Europe, France * - Middle East - ``google-me-west1`` - - Tel Aviv + - Middle East, Israel * - North America - ``google-northamerica-northeast1`` - - Montreal + - Canada, Quebec * - North America - ``google-northamerica-northeast2`` - - Toronto + - Canada, Ontario * - North America - ``google-us-central1`` - - Iowa + - United States, Iowa * - North America - ``google-us-east1`` - - South Carolina + - United States, South Carolina * - North America - ``google-us-east4`` - - N. Virginia + - United States, Virginia * - North America - ``google-us-east5`` - - Columbus + - United States, Ohio * - North America - ``google-us-south1`` - - Dallas + - United States, Texas * - North America - ``google-us-west1`` - - Oregon + - United States, Oregon * - North America - ``google-us-west2`` - - Los Angeles + - United States, California * - North America - ``google-us-west3`` - - Salt Lake City + - United States, Utah * - North America - ``google-us-west4`` - - Las Vegas + - United States, Nevada * - South America - ``google-southamerica-east1`` - - Sao Paulo + - South America, Brazil * - South America - ``google-southamerica-west1`` - - Santiago + - South America, Chile UpCloud ----------------------------------------------------- diff --git a/includes/config-cassandra.rst b/includes/config-cassandra.rst index 17e4a02e15..9902e1f044 100644 --- a/includes/config-cassandra.rst +++ b/includes/config-cassandra.rst @@ -117,3 +117,19 @@ +``backup_hour`` +--------------- +*['integer', 'null']* + +**Hour of the day (in UTC) when the backup for the service is started. A new backup is only started if the previous backup has already completed.** + + + +``backup_minute`` +----------------- +*['integer', 'null']* + +**Minute of the hour when the backup for the service is started. A new backup is only started if the previous backup has already completed.** + + + diff --git a/includes/config-kafka.rst b/includes/config-kafka.rst index b64f5634fe..9ee3189596 100644 --- a/includes/config-kafka.rst +++ b/includes/config-kafka.rst @@ -575,6 +575,12 @@ **producer.linger.ms** Wait for up to the given delay to allow batching records together +``producer_max_request_size`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +*integer* + +**producer.max.request.size** The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. + ``consumer_enable_auto_commit`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ *boolean* diff --git a/index.rst b/index.rst index 6017d254a3..09add91bb6 100644 --- a/index.rst +++ b/index.rst @@ -37,7 +37,6 @@ Get started with Aiven's fully-managed services. .. button-link:: docs/products/kafka - :align: right :color: primary :outline: @@ -53,7 +52,6 @@ Get started with Aiven's fully-managed services. Framework for definining powerful transformations of batch and streaming data sets. .. button-link:: docs/products/flink - :align: right :color: primary :outline: @@ -68,7 +66,6 @@ Get started with Aiven's fully-managed services. High performance storage solution for large data quantities. This specialist data solution is a partitioned row store. .. button-link:: docs/products/cassandra - :align: right :color: primary :outline: @@ -84,7 +81,6 @@ Get started with Aiven's fully-managed services. A highly scalable, open source database that uses a column-oriented structure. .. button-link:: docs/products/clickhouse - :align: right :color: primary :outline: @@ -99,7 +95,6 @@ Get started with Aiven's fully-managed services. The visualization tool you need to explore and understand your data. Grafana integrates with the other services in just a few clicks. .. button-link:: docs/products/grafana - :align: right :color: primary :outline: @@ -115,7 +110,6 @@ Get started with Aiven's fully-managed services. Specialist time series database, with good tooling support. .. button-link:: docs/products/influxdb - :align: right :color: primary :outline: @@ -130,7 +124,6 @@ Get started with Aiven's fully-managed services. Distributed time-series database for scalable solutions, with M3 Coordinator included, and M3 Aggregator also available. .. button-link:: docs/products/m3db - :align: right :color: primary :outline: @@ -146,7 +139,6 @@ Get started with Aiven's fully-managed services. Popular and much-loved relational database platform. .. button-link:: docs/products/mysql - :align: right :color: primary :outline: @@ -161,7 +153,6 @@ Get started with Aiven's fully-managed services. Document database with specialist search features, bring your freeform documents, logs or metrics, and make sense of them here. .. button-link:: docs/products/opensearch - :align: right :color: primary :outline: @@ -177,7 +168,6 @@ Get started with Aiven's fully-managed services. Powerful relational database platform. We have the latest versions, and an excellent selection of extensions. .. button-link:: docs/products/postgresql - :align: right :color: primary :outline: @@ -192,7 +182,6 @@ Get started with Aiven's fully-managed services. In-memory data store for all your high-peformance short-term storage and caching needs. .. button-link:: docs/products/redis - :align: right :color: primary :outline: @@ -218,7 +207,6 @@ Interfaces Web-based graphical interface for creating and managing your services. .. button-link:: docs/tools/aiven-console - :align: center :color: primary :outline: @@ -233,7 +221,6 @@ Interfaces Command line client for the Aiven platform. .. button-link:: docs/tools/cli - :align: center :color: primary :outline: @@ -253,7 +240,6 @@ Automation A public API you can use for programmatic integrations. .. button-link:: docs/tools/api - :align: right :color: primary :outline: @@ -268,7 +254,6 @@ Automation An infrastructure-as-code tool for lifecycle management of your Aiven resources. .. button-link:: docs/tools/terraform - :align: right :color: primary :outline: @@ -283,7 +268,6 @@ Automation Provision and manage Aiven services from your Kubernetes cluster. .. button-link:: https://docs.aiven.io/docs/tools/kubernetes.html - :align: right :color: primary :outline: diff --git a/override_canonical.py b/override_canonical.py new file mode 100644 index 0000000000..a263c16f5c --- /dev/null +++ b/override_canonical.py @@ -0,0 +1,13 @@ +# The canonical tags of docs.aiven.io are pointing to redirected URL versions +# ending with .html causing a redirect-canonical loop and makes the pages non-indexable. +# This will overwrite the canonical tag defined in furo base.html template +# Note that this method might not work if the Furo theme or Sphinx changes how they handle +# the canonical URL in future releases. +def override_canonical(app, pagename, templatename, context, doctree): + final_pagename = pagename + if pagename != 'genindex' and pagename.endswith('index'): + final_pagename = pagename[:-5] # remove 'index' from the pagename + context['pageurl'] = "https://docs.aiven.io/{}".format(final_pagename) + +def setup(app): + app.connect('html-page-context', override_canonical) \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index b38d755b37..5d2efb7705 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,8 +7,9 @@ sphinx-external-toc==0.2.3 sphinx-copybutton==0.5.0 sphinx_gitstamp==0.3.2 beautifulsoup4==4.9.3 +lxml==4.9.2 opensearch-py==1.0.0 -requests==2.25.1 +requests==2.31.0 sphinxext-opengraph==0.4.2 sphinx-sitemap==2.2.0 sphinx-notfound-page==0.8 diff --git a/scripts/postprocess_sitemap.py b/scripts/postprocess_sitemap.py new file mode 100644 index 0000000000..e26434b73a --- /dev/null +++ b/scripts/postprocess_sitemap.py @@ -0,0 +1,25 @@ +from bs4 import BeautifulSoup + +with open('./_build/html/sitemap.xml', 'r') as f: + contents = f.read() + +soup = BeautifulSoup(contents, 'xml') + +urls = soup.find_all('url') + +for url in urls: + loc = url.find('loc') + text = loc.string + # Remove the 'gen' and '404' pages + if '404' in text: + url.decompose() + continue + if text.endswith('genindex.html'): + loc.string = text[:-5] # removes the ".html" + elif text.endswith('index.html'): + loc.string = text[:-10] # removes the "index.html" + elif text.endswith('.html'): + loc.string = text[:-5] # removes the ".html" + +with open('./_build/html/sitemap.xml', 'w') as f: + f.write(str(soup))