diff --git a/.github/vale/dicts/aiven.dic b/.github/vale/dicts/aiven.dic index 5201106bdb..a3b44e17d4 100644 --- a/.github/vale/dicts/aiven.dic +++ b/.github/vale/dicts/aiven.dic @@ -170,6 +170,7 @@ PgBouncer PGHoard pglookout pgoutput +pgvector plaintext plc PNG/S @@ -251,6 +252,7 @@ untrusted unaggregated UpCloud upsert +vectorizing VM VMs VPC/MS diff --git a/.github/vale/styles/Aiven/capitalization_headings.yml b/.github/vale/styles/Aiven/capitalization_headings.yml index dc23377834..2660de9b45 100644 --- a/.github/vale/styles/Aiven/capitalization_headings.yml +++ b/.github/vale/styles/Aiven/capitalization_headings.yml @@ -15,6 +15,7 @@ exceptions: - Aiven Console - Apache - AWS Transit Gateway + - AWS Marketplace - Auth0 - Azure - Azure Marketplace @@ -93,6 +94,7 @@ exceptions: - Pagila - pgAdmin - PgBouncer + - pgvector - PostgreSQL - Postman - Premium diff --git a/Makefile b/Makefile index 11ed46428e..c5aa585964 100644 --- a/Makefile +++ b/Makefile @@ -21,6 +21,13 @@ help: %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) +# This will overwrite the generated sitemap bu sphinx_sitemap to +# exclude index.html or .html extension in the . This is to prevent +# redirect loop (issue for search engine) since Cloudflare Pages redirect all .html to it's parent +html: + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + python "$(SOURCEDIR)/scripts/postprocess_sitemap.py" + livehtml: sphinx-autobuild "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/_static/css/aiven.css b/_static/css/aiven.css index 93a0dce362..d7f16120d7 100644 --- a/_static/css/aiven.css +++ b/_static/css/aiven.css @@ -20,26 +20,35 @@ src: url(../fonts/Inter-Medium.ttf); } -.sidebar-drawer { - border-right: none; +@font-face { + font-family: Poppins; + src: url(../fonts/Poppins-SemiBold.ttf); } h1, h2, h3 { color: var(--color-foreground-primary); - font-family: InterBold; + font-family: "Poppins",sans-serif; } p { color: var(--color-content-foreground); - font-family: Inter; + font-family: "Inter",sans-serif; } a { color: var(--color-link); - text-decoration-color: var(--color-link); - font-family: Inter; + font-family: "Inter",sans-serif; + text-decoration-color: var(--color-link--hover); +} + +a:hover { + color: var(--color-link--hover); +} + +a.muted-link { + text-decoration: underline; } .page { @@ -107,16 +116,6 @@ a { width: 140px; } -.topnavbar-button-primary { - color: var(--color-topnav-button-primary); - background: var(--color-topnav-button-primary-background); -} - -.topnavbar-button-primary:hover { - color: var(--color-topnav-button-primary-hover); - background: var(--color-topnav-button-primary-hover-background); -} - .topnavbar-button-secondary { color: var(--color-topnav-button-secondary); border-color: var(--color-topnav-button-secondary-border); @@ -133,22 +132,50 @@ a { width: 100%; max-width: max-content; padding: 8px 24px; - border: 1px solid; + border: 1px solid var(--color-topnav-button-secondary-border); border-radius: 2px; cursor: pointer; text-decoration: none !important; + font-weight: 600; } .topnavbar-button-large:hover { text-decoration: none !important; } +.sd-btn.sd-btn-primary, +.sd-btn.sd-btn-outline-primary { + border-radius: 2px; + border-color: var(--color-topnav-button-secondary-border) !important; +} + +.sd-btn.sd-btn-primary:hover, +.sd-btn.sd-btn-outline-primary:hover, +.sd-btn.sd-btn-outline-primary:focus { + color: var(--color-topnav-button-secondary) !important; + border-color: var(--color-topnav-button-secondary-hover-border) !important; + background-color: transparent !important; +} + +.topnavbar-button-primary { + color: var(--color-topnav-button-primary); + background: var(--color-topnav-button-primary-background); + border: 1px solid var(--color-topnav-button-primary-background); +} + +.topnavbar-button-primary:hover { + color: var(--color-topnav-button-primary-hover); + background: var(--color-topnav-button-primary-hover-background); + border: 1px solid var(--color-topnav-button-primary-hover-background); +} + .topnavbar-link { color: var(--color-topnav-link); text-decoration: none; } .topnavbar-link:hover { + color: var(--color-topnav-link); text-decoration: underline; text-decoration-color: var(--color-topnav-link); } @@ -193,10 +220,6 @@ a { top: var(--topnavbar-height); } -.sidebar-tree .current-page > .reference { - font-weight: 400; -} - .sidebar-tree label:hover { background: none; } @@ -213,25 +236,25 @@ a { } .sidebar-tree .reference:hover { - background: none; - border-left: 1px solid var(--color-link); + border-left: 1px solid var(--color-brand-primary); font-family: Inter; - color: var(--color-link); + color: var(--color-sidebar-link-text); + font-weight: 600; } -.sidebar-tree .current-page > .reference { +.sidebar-tree .current > .reference { background: none !important; - border-left: 1px solid var(--color-link); - color: var(--color-link) !important; - font-weight: 400 !important; } -.sidebar-tree .current > .reference { - background: none !important; +.sidebar-tree .current-page > .reference { + font-weight: 600; + background: var(--color-sidebar-background-hover) !important; + border-left: 1px solid var(--color-brand-primary); + color: var(--color-sidebar-link-text) !important; } .sidebar-tree .current-page > .reference:hover { - background: none !important; + background: var(--color-sidebar-background-hover); } .sidebar-tree > .reference, @@ -271,7 +294,11 @@ a { .sidebar-search:focus { color: var(--color-search-focused); border-color: var(--color-search-border-focused); - outline: 2px solid var(--color-search-container-outline-focused); + outline: none; +} + +.sidebar-search::placeholder { + color: var(--color-search-focused); } .sidebar-search-container::before { @@ -353,17 +380,6 @@ h3 > code > .pre { top: var(--topnavbar-height) !important; } -.toctree-wrapper .reference { - font-family: Inter; - color: var(--color-sidebar-link-text); - text-decoration-color: var(--color-sidebar-link-text); -} - -.toctree-wrapper .reference:hover { - color: var(--color-link); - text-decoration-color: var(--color-link); -} - .toctree-wrapper > ul > li.toctree-l1 { list-style: none !important; margin: 12px; diff --git a/_static/fonts/Poppins-SemiBold.ttf b/_static/fonts/Poppins-SemiBold.ttf new file mode 100644 index 0000000000..74c726e327 Binary files /dev/null and b/_static/fonts/Poppins-SemiBold.ttf differ diff --git a/_toc.yml b/_toc.yml index 0abac11066..7a9135def9 100644 --- a/_toc.yml +++ b/_toc.yml @@ -44,6 +44,7 @@ entries: - file: docs/platform/howto title: HowTo entries: + - file: docs/platform/howto/feature-preview - file: docs/platform/howto/list-user title: User and access management entries: @@ -51,6 +52,7 @@ entries: - file: docs/platform/howto/change-your-email-address - file: docs/platform/howto/add-authentication-method - file: docs/platform/howto/create_authentication_token + - file: docs/platform/howto/manage-org-users - file: docs/platform/howto/create_new_service_user - file: docs/tools/aiven-console/howto/create-manage-teams - file: docs/platform/howto/user-2fa @@ -116,12 +118,18 @@ entries: - file: docs/platform/howto/billing-assign-projects - file: docs/platform/howto/payment-issues-plan-upgrades - file: docs/platform/howto/custom-plans + - file: docs/platform/howto/billing-aws-marketplace-subscription + title: Set up AWS Marketplace + - file: docs/platform/howto/move-to-aws-marketplace-billing + title: Move to AWS Marketplace - file: docs/platform/howto/billing-google-cloud-platform-marketplace-subscription title: Set up Google Cloud Marketplace - file: docs/platform/howto/move-to-gcp-marketplace-billing title: Move to Google Cloud Marketplace - file: docs/platform/howto/billing-azure-marketplace-subscription title: Set up Azure Marketplace + - file: docs/platform/howto/move-to-azure-marketplace-billing + title: Move to Azure Marketplace - file: docs/platform/howto/list-saml title: SAML Authentication entries: @@ -299,6 +307,9 @@ entries: - file: docs/products/kafka/concepts/auth-types - file: docs/products/kafka/concepts/non-leader-for-partition - file: docs/products/kafka/concepts/configuration-backup + - file: docs/products/kafka/concepts/monitor-consumer-group + - file: docs/products/kafka/concepts/kafka-quotas + title: Quotas - file: docs/products/kafka/howto title: HowTo entries: @@ -350,6 +361,7 @@ entries: - file: docs/products/kafka/howto/prevent-full-disks - file: docs/products/kafka/howto/use-zookeeper - file: docs/products/kafka/howto/avoid-out-of-memory-error + - file: docs/products/kafka/howto/manage-quotas - file: docs/products/kafka/howto/list-integration title: Integrations @@ -1072,6 +1084,8 @@ entries: title: TimescaleDB - file: docs/products/postgresql/concepts/upgrade-failover title: Upgrade and failover procedures + - file: docs/products/postgresql/concepts/pgvector + title: AI-powered search with pgvector - file: docs/products/postgresql/howto title: HowTo entries: @@ -1112,6 +1126,8 @@ entries: - file: docs/products/postgresql/howto/optimize-pg-slow-queries - file: docs/products/postgresql/howto/check-avoid-transaction-id-wraparound - file: docs/products/postgresql/howto/prevent-full-disk + - file: docs/products/postgresql/howto/use-pgvector + title: Enable and use pgvector - file: docs/products/postgresql/howto/list-replication-migration title: Migrate entries: diff --git a/conf.py b/conf.py index 61c71c1c72..dc4be3bf20 100644 --- a/conf.py +++ b/conf.py @@ -20,7 +20,7 @@ # -- Project information ----------------------------------------------------- project = 'Aiven Docs' -copyright = '2022, Aiven Team' +copyright = '2023, Aiven Team' author = 'Aiven Team' html_title = 'Aiven' @@ -96,84 +96,99 @@ html_favicon = './_static/images/favicon.ico' html_theme = 'furo' html_theme_options = { - "light_logo": "images/logoLightPride.png", - "dark_logo": "images/logoDarkPride.png", + "light_logo": "images/logoLight.png", + "dark_logo": "images/logoDark.png", "light_css_variables": { - "color-brand-primary": "#c60443", - "color-brand-content": "#4a4b57", - "color-link": "#c60443", - "color-sidebar-link-text": "#4a4b57", - "color-sidebar-link-text--top-level": "#4a4b57", + "color-brand-primary": "#FF5200", + "color-brand-content": "#343745", + "color-link": "#007cc2", + "color-link--hover": "#016BB2", + "color-link-underline--hover": "#016BB2", + "color-sidebar-link-text": "#343745", + "color-sidebar-link-text--top-level": "#343745", "font-stack": "Inter, sans-serif", - "color-sidebar-brand-text": "#4a4b57", + "color-sidebar-brand-text": "#343745", + "color-sidebar-background-hover": "#F8F9FB", + "color-sidebar-item-background--hover": "#F8F9FB", "color-foreground-primary": "#333333", - "color-foreground-secondary": "#747481", - "color-foreground-muted": "#747481", - "color-foreground-border": "#e1e1e3", + "color-foreground-secondary": "#4f5366", + "color-foreground-muted": "#4f5366", + "color-foreground-border": "#EDEDF0", "color-background-primary": "#ffffff", "color-background-secondary": "#f7f7fa", - "color-content-foreground": "#747481", + "color-content-foreground": "#4f5366", "color-background-hover": "#c60443", - "color-background-border": "#e1e1e3", + "color-background-border": "#EDEDF0", "color-highlighted-background": "#1c1c2f", - "color-inline-code-background": "#747481", - "color-sidebar-background": "#f7f7fa", - "color-sidebar-background-border": "#e1e1e3", + "color-inline-code-background": "#4f5366", + "color-sidebar-background": "#FFFFFF", + "color-sidebar-background-border": "#EDEDF0", "color-sidebar-search-background": "#fff", - "sd-color-card-background": "#f7f7fa", - "sd-color-primary": "#4a4b57", + "color-card-border": "#E1E1E3", + "sd-color-card-background": "#fff", + "sd-color-primary": "#343745", + "sd-color-shadow": "none", "sidebar-tree-space-above": "8px", # Custom css variables "color-search": "#19191D", "color-search-focused": "#4A4B57", "color-search-border": "#B4B4BB", - "color-search-border-focused": "#0174BA", + "color-search-border-focused": "#4F5366", "color-search-container-outline-focused": "#B4E5FB", "color-search-background": "#FFFFFF", + "color-sidebar-search-icon": "#B4B4BB", "color-topnav-background": "#FFFFFF", "color-topnav-border": "#EDEEF3", - "color-topnav-link": "#E41A4A", + "color-topnav-link": "#1A1B22", "color-topnav-theme-toggle-border": "rgba(0, 0, 0, 0.1)", "color-topnav-button-primary": "#FFFFFF", "color-topnav-button-primary-hover": "#FFFFFF", - "color-topnav-button-primary-background": "#E41A4A", - "color-topnav-button-primary-hover-background": "#C60443", - "color-topnav-button-secondary": "#E41A4A", - "color-topnav-button-secondary-border": "#E41A4A", - "color-topnav-button-secondary-hover": "#E41A4A", - "color-topnav-button-secondary-hover-border": "#E41A4A", - "color-topnav-button-secondary-hover-background": "#FFF9FC" + "color-topnav-button-primary-background": "#FF5200", + "color-topnav-button-primary-hover-background": "#F04D00", + "color-topnav-button-secondary": "#1A1B22", + "color-topnav-button-secondary-border": "#B4B7C5", + "color-topnav-button-secondary-hover": "#1A1B22", + "color-topnav-button-secondary-hover-border": "#4F5366", + "color-topnav-button-secondary-hover-background": "transparent", + "color-toc-item-text--active": "#007cc2", + "color-highlight-on-target": "#F8F9FB", }, "dark_css_variables": { - "color-brand-primary": "#d2d2d6", + "color-brand-primary": "#FF5200", "color-brand-content": "#ffffff", - "color-link": "#d2d2d6", + "color-link": "#6DCDF8", + "color-link--hover": "#3DBBF5", + "color-link-underline--hover": "#3DBBF5", "font-stack": "Inter, sans-serif", - "color-sidebar-brand-text": "#d2d2d6", - "color-sidebar-link-text": "#d2d2d6", - "color-sidebar-link-text--top-level": "#d2d2d6", + "color-sidebar-brand-text": "#D2D2D6", + "color-sidebar-background-hover": "#161825", + "color-sidebar-item-background--hover": "#161825", + "color-sidebar-link-text": "#D2D2D6", + "color-sidebar-link-text--top-level": "#D2D2D6", "color-foreground-primary": "#ffffff", - "color-foreground-secondary": "#83839d", - "color-foreground-muted": "#747481", - "color-foreground-border": "#e1e1e3", + "color-foreground-secondary": "#D2D2D6", + "color-foreground-muted": "#D2D2D6", + "color-foreground-border": "#3A3A44", "color-background-primary": "#11111e", "color-background-secondary": "#1c1c2f", - "color-content-foreground": "#83839d", + "color-content-foreground": "#D2D2D6", "color-background-hover": "#ff3554", - "color-background-border": "#e1e1e3", + "color-background-border": "#3A3A44", "color-highlighted-background": "#1c1c2f", "color-inline-code-background": "#f7f7fa", - "color-sidebar-background": "#0b0b14", - "color-sidebar-background-border": "#e1e1e3", + "color-sidebar-background": "#11111E", + "color-sidebar-background-border": "#3A3A44", "color-sidebar-search-background": "#1c1c2f", "color-admonition-title-background--tip": "#00c85240", "color-admonition-title-background--note": "#00b0ff40", "color-admonition-title-background--warning": "#ff910040", "color-admonition-title-background--error": "#ff525240", - "sd-color-card-background": "#0b0b14", + "sd-color-card-background": "#161825", "sd-color-card-header": "#0b0b14", - "sd-color-primary": "#e1e1e3", + "sd-color-primary": "#ffffff", + "sd-color-shadow": "none", + "color-card-border": "#3A3A44", "sidebar-tree-space-above": "8px", # Custom css variables @@ -183,19 +198,22 @@ "color-search-border-focused": "#7FD1F7", "color-search-container-outline-focused": "#0174BA", "color-search-background": "#11111E", + "color-sidebar-search-icon": "#F7F7FA", "color-topnav-background": "#0B0B14", "color-topnav-border": "#3A3A44", "color-topnav-link": "#F7F7FA", "color-topnav-theme-toggle-border": "rgba(255, 255, 255, 0.1)", - "color-topnav-button-primary": "black", - "color-topnav-button-primary-hover": "black", - "color-topnav-button-primary-background": "#ffffff", - "color-topnav-button-primary-hover-background": "#EDEDF0", + "color-topnav-button-primary": "#FFFFFF", + "color-topnav-button-primary-hover": "#FFFFFF", + "color-topnav-button-primary-background": "#FF5200", + "color-topnav-button-primary-hover-background": "#F04D00", "color-topnav-button-secondary": "#f7f7fa", "color-topnav-button-secondary-border": "#f7f7fa", "color-topnav-button-secondary-hover": "#f7f7fa", "color-topnav-button-secondary-hover-border": "#f7f7fa", - "color-topnav-button-secondary-hover-background": "rgba(255, 255, 255, 0.1)" + "color-topnav-button-secondary-hover-background": "transparent", + "color-toc-item-text--active": "#6DCDF8", + "color-highlight-on-target": "#161825", }, "navigation_with_keys": True } diff --git a/docs/platform/concepts/service_backups.rst b/docs/platform/concepts/service_backups.rst index e9a60e3707..5721806a8a 100644 --- a/docs/platform/concepts/service_backups.rst +++ b/docs/platform/concepts/service_backups.rst @@ -6,7 +6,12 @@ This article provides information on general rules for handling service backups About backups at Aiven ---------------------- -All Aiven services, except for Apache Kafka® and M3 Aggregator/Coordinator, have time-based backups that are encrypted and securely stored. The backup retention times vary based on the service and the selected service plan. +All Aiven services, except for Apache Kafka® and M3 Aggregator/Coordinator, have time-based backups that are encrypted and securely stored. Backups at Aiven are stored in the object storage of the cloud region where a service runs (for example, S3 for AWS or GCS for GCP). You can check the location of your service's backups in `Aiven Console `_ > your service's homepage > **Backups** tab. + +.. image:: /images/platform/concepts/backup_location_preview.png + :alt: Backup location preview on console + +The backup retention times vary based on the service and the selected service plan. Aiven takes service backups for managing purposes. These backups are compressed and encrypted by the Aiven management platform and, as such, are not available for download for any service type. diff --git a/docs/platform/howto/billing-aws-marketplace-subscription.rst b/docs/platform/howto/billing-aws-marketplace-subscription.rst new file mode 100644 index 0000000000..5cd7478ee2 --- /dev/null +++ b/docs/platform/howto/billing-aws-marketplace-subscription.rst @@ -0,0 +1,33 @@ +Set up AWS Marketplace for Aiven services +=========================================== + +Aiven makes its services available through the Amazon AWS Marketplace. This article shows the steps needed to create a subscription that links the accounts. + +First, there are some steps that need to be completed on the AWS Marketplace page before heading over to the Aiven Console and finishing the process. + +AWS Marketplace setup +--------------------- + +1. Search for "Aiven Managed Database Services" on the `AWS Marketplace `_. This page contains information about all of Aiven's services and how the marketplace subscription works. Click the **View purchase options** button on this page. + +.. image:: /images/platform/howto/aws-marketplace-listing.png + :alt: AWS Marketplace purchase options button for Aiven Managed Database Services + +2. When you are ready, click the **Subscribe** button on the page. You will NOT be charged by clicking this button; this only sets up a billing subscription between AWS and Aiven. You will only be charged after deploying Aiven services. + +3. Click **Set up your account**. This takes you to the Aiven Console to complete the process. + +Aiven account setup +------------------- + +4. You should now be on the AWS signup page at Aiven, asking you to sign up or log in. + +5. After registering or logging in, choose or create an Aiven organization to use the AWS subscription for. If you have any existing Aiven projects that you want to be moved to this AWS subscription, this organization name is the one you will need for that. + +If you have any issues linking Aiven to your AWS subscription, you can try the process again in the AWS web console by finding the Aiven subscription and clicking **Set up your account**. + + +.. note:: + The URL that you log in to for your AWS subscription is https://console.aws.aiven.io. This is different from the Aiven Console (https://console.aiven.io). + + diff --git a/docs/platform/howto/billing-azure-marketplace-subscription.rst b/docs/platform/howto/billing-azure-marketplace-subscription.rst index b57baae037..ad975be5f9 100644 --- a/docs/platform/howto/billing-azure-marketplace-subscription.rst +++ b/docs/platform/howto/billing-azure-marketplace-subscription.rst @@ -3,41 +3,38 @@ Set up Azure Marketplace for Aiven services Aiven makes its services available through the Microsoft Azure Marketplace. This article shows the steps needed to create a subscription that links the accounts. -First, there are some steps that need to be completed on the Azure Marketplace page before heading over to the Aiven console and finishing the process. +First, there are some steps that need to be completed on the Azure Marketplace page before heading over to the Aiven Console and finishing the process. Azure Marketplace setup ----------------------- -1. Search for "Aiven Managed Database Services" on the `Azure Marketplace `_. This page contains information about all of Aiven's services and how the marketplace subscription works. Click the **Subscribe** button on this page. +1. Search for "Aiven Managed Database Services" on the `Azure Marketplace `_. This page contains information about all of Aiven's services and how the marketplace subscription works. Click the **Subscribe** button on this page. .. image:: /images/platform/howto/azure-marketplace-listing.png :alt: Azure Marketplace listing tile for Aiven Managed Database Services :height: 342px -2. Select your desired Azure subscription resource group to organise your resources, give the subscription a name, and make sure that "Recurring billing" is turned on. There is only one plan available because all of the costs are managed by Aiven based on what you use during the month. +2. Select your desired Azure subscription resource group to organise your resources, give the subscription a name, and make sure that "Recurring billing" is turned on. There is only one plan available because all of the costs are managed by Aiven based on what you use during the month. 3. Progress to the "Review + subscribe" screen, then read and agree to the terms of use. -4. When you are ready, click the **Subscribe** button at the bottom of the page. You will NOT be charged by clicking this button; this only sets up a billing subscription between Azure and Aiven. You will only be charged after deploying Aiven services. +4. When you are ready, click the **Subscribe** button at the bottom of the page. You will NOT be charged by clicking this button; this only sets up a billing subscription between Azure and Aiven. You will only be charged after deploying Aiven services. -5. You should now see a message that says "Your SaaS subscription is in progress". This takes a few minutes to complete before you can progress. +5. You should now see a message that says "Your SaaS subscription is in progress". This takes a few minutes to complete before you can progress. 6. When you see the message "Thank you for your order. Configure the SaaS service to complete the purchase", click the "Configure account now" button to head over to the Aiven website to complete the process. Aiven account setup ------------------- -7. You should now be on the `Azure signup page at Aiven `_, asking you for your email address to log in to the account. This should be the same email as you use on the Azure console. +7. You should now be on the `Azure signup page at Aiven `_, asking you for your email address to log in to the account. This should be the same email as you use on the Azure console. -8. After entering your email address, you will be authenticated via Azure single sign-on and then returned to the Aiven console. +8. After entering your email address, you will be authenticated via Azure single sign-on and then returned to the Aiven Console. 9. You will be sent an email to "Activate your new subscription" - click on the **Activate now** link to join your Aiven account to your Azure account. -10. You are now ready to create your first project and deploy services. - .. note:: - Note the URL is https://console.azure.aiven.io - this uses a different account system than https://console.aiven.io. When coming back to Aiven in the future, you will need to use https://console.azure.aiven.io to login, and authenticate using Azure OAuth. + The URL is https://console.azure.aiven.io - this is different from the Aiven Console (https://console.aiven.io). -.. note:: - When you view the Aiven subscription on the Azure SaaS resource list, you will see a link to **Open SaaS Account on publisher's site**. You can use this link to complete the subscription process if anything goes wrong during the steps listed here. +When you view the Aiven subscription on the Azure SaaS resource list, you will see a link to **Open SaaS Account on publisher's site**. You can use this link to complete the subscription process if anything goes wrong during the steps listed here. diff --git a/docs/platform/howto/feature-preview.rst b/docs/platform/howto/feature-preview.rst new file mode 100644 index 0000000000..d628558569 --- /dev/null +++ b/docs/platform/howto/feature-preview.rst @@ -0,0 +1,15 @@ +Feature previews +================= + +Before an official release, some features are available to our customers for testing. These feature previews let you try out upcoming enhancements and give our product teams feedback to help improve them. + +Enable a feature preview +------------------------- + +To try upcoming features before they are released: + +#. Click the **User information** icon in the top right and select **Feature preview**. + +#. On the **Feature preview** tab, click **Enable** for any of the features you want to test. + +After enabling a feature preview and testing it, you can provide feedback from this page by clicking **Give feedback**. \ No newline at end of file diff --git a/docs/platform/howto/manage-org-users.rst b/docs/platform/howto/manage-org-users.rst new file mode 100644 index 0000000000..88805484a2 --- /dev/null +++ b/docs/platform/howto/manage-org-users.rst @@ -0,0 +1,59 @@ +Manage users in an organization +================================ + +.. important:: + Organization users are available as a feature preview and must be :doc:`enabled in the user profile `. + +Adding users to your organization lets you give them access to specific organizational units, projects, and services within that organization. + +Invite users to an organization +--------------------------------- + +To add users to your organization, send them an invite: + +#. Click **Admin**. + +#. Click **Users**. + +#. Click **Invite users**. + +#. Enter the email addresses of the people you want to invite. + +#. Click **Invite users**. + +The users receive an email with instructions to sign up (for new users) and accept the invite. + + +Remove users from an organization +---------------------------------- + +If you remove a user from an organization, they will also be removed from all teams and projects and no longer have access to any resources in the organization. + +To remove a user from an organization: + +#. Click **Admin**. + +#. Click **Users**. + +#. Find the user that you want to remove and click the **Actions** menu. + +#. Select **Remove**. + +#. Confirm you want to remove the user by clicking **Remove user**. + + +Resend an invite +----------------- + +If you need to resend an invite to a user: + +#. Click **Admin**. + +#. Click **Users**. + +#. Find the email address that you want to resend an invite to and click the **Actions** menu. + +#. Select **Resend invite**. + +They get a new email with instructions for signing up or accepting the invite. + diff --git a/docs/platform/howto/manage-vpc-peering.rst b/docs/platform/howto/manage-vpc-peering.rst index 02cd8480fa..0998687928 100644 --- a/docs/platform/howto/manage-vpc-peering.rst +++ b/docs/platform/howto/manage-vpc-peering.rst @@ -75,4 +75,17 @@ You can enable public internet access for your services by following the :doc:`E IP filtering (the Allowed IP Addresses list on the service overview page) is still available for a service deployed to a VPC where both public and private access are allowed. We recommend that you use IP filtering when your VPC service is also exposed to the public internet. -Also note that safelisting applies to both internal and external traffic. If you safelist an external IP address and want to keep traffic flowing with the internal (peered) connections, make sure that you safelist the CIDR blocks of the peered networks as well to avoid disruptions to the service. \ No newline at end of file +Also note that safelisting applies to both internal and external traffic. If you safelist an external IP address and want to keep traffic flowing with the internal (peered) connections, make sure that you safelist the CIDR blocks of the peered networks as well to avoid disruptions to the service. + +Troubleshoot VPC connection issues +------------------------------------- + +Any network changes to VPC peered hosts external from Aiven can cause issues with routing to your Aiven services hosted in a VPC. To troubleshoot such issues, take the following steps: + +1. In `Aiven Console `_, select **VPC**. +2. Find the ID of the affected VPC and select it from the **Internal ID** column. +3. Select **Refresh VPC connections**. + +As a result, the platform checks the VPC peering connection and rebuilds the peering connection state if there are any changes detected. + +For any other issues, open a support ticket from Aiven Console to get in touch with the support team and/or see :doc:`Get support in the Aiven Console `. \ No newline at end of file diff --git a/docs/platform/howto/move-to-aws-marketplace-billing.rst b/docs/platform/howto/move-to-aws-marketplace-billing.rst new file mode 100644 index 0000000000..78ff1fcd5a --- /dev/null +++ b/docs/platform/howto/move-to-aws-marketplace-billing.rst @@ -0,0 +1,28 @@ +Move from Aiven direct billing to AWS Marketplace +================================================= + +Aiven makes its services available through the AWS Marketplace. If you already have some services running in a project which is billed directly through Aiven but you would like to move to an AWS Marketplace subscription without disrupting your services, this article shows the steps needed to gather the relevant information and submit the request. + +Set up an AWS Marketplace subscription for Aiven +---------------------------------------------------------- + +Follow the steps to :doc:`set up AWS Marketplace for Aiven services `. This will create a new Aiven organization, which is where your projects will be moved to. + +Gather the required information +------------------------------- + +Aiven will need some information from both your existing user account and your new subscription in order to perform the migration. + +**From your existing Aiven user account:** + +* The name of the Aiven projects that have the services you wish to move. + +**From your new Aiven organization with the AWS marketplace subscription:** + +* The name of the new organization. + +Send the request to Aiven +------------------------- + +Once you have collected the information above, send it by email to `sales@Aiven.io `_ and someone will be in touch to complete the process. + diff --git a/docs/platform/howto/move-to-azure-marketplace-billing.rst b/docs/platform/howto/move-to-azure-marketplace-billing.rst new file mode 100644 index 0000000000..dc9a60865e --- /dev/null +++ b/docs/platform/howto/move-to-azure-marketplace-billing.rst @@ -0,0 +1,28 @@ +Move from Aiven direct billing to Azure Marketplace +=================================================== + +Aiven makes its services available through the Azure Marketplace. If you already have some services running in a project which is billed directly through Aiven but you would like to move to an Azure Marketplace subscription without disrupting your services, this article shows the steps needed to gather the relevant information and submit the request. + +Set up an Azure Marketplace subscription for Aiven +-------------------------------------------------- + +Follow the steps to :doc:`set up Azure Marketplace for Aiven services `. This will create a new Aiven organization, which is where your projects will be moved to. + +Gather the required information +------------------------------- + +Aiven will need some information from both your existing user account and your new subscription in order to perform the migration. + +**From your existing Aiven user account:** + +* The name of the Aiven projects that have the services you wish to move. + +**From your new Aiven organization with the Azure marketplace subscription:** + +* The name of the new organization. + +Send the request to Aiven +------------------------- + +Once you have collected the information above, send it by email to `sales@Aiven.io `_ and someone will be in touch to complete the process. + diff --git a/docs/platform/howto/move-to-gcp-marketplace-billing.rst b/docs/platform/howto/move-to-gcp-marketplace-billing.rst index fdbb0268a7..345ab51ab5 100644 --- a/docs/platform/howto/move-to-gcp-marketplace-billing.rst +++ b/docs/platform/howto/move-to-gcp-marketplace-billing.rst @@ -19,7 +19,7 @@ Aiven will need some information from both your existing and new subscriptions i **From your new Aiven organization with the GCP marketplace subscription:** -* Your new Aiven organization name, as shown at the top of the `Aiven GCP console `_. +* Your new Aiven organization name, as shown in the `Aiven GCP console `_. Send the request to Aiven ------------------------- diff --git a/docs/platform/howto/use-google-private-service-connect.rst b/docs/platform/howto/use-google-private-service-connect.rst index 246691dae3..92da446349 100644 --- a/docs/platform/howto/use-google-private-service-connect.rst +++ b/docs/platform/howto/use-google-private-service-connect.rst @@ -111,7 +111,7 @@ To approve the connection, run the following approval command: .. code:: shell - avn privatelink google connection approve MY_SERVICE_NAME --privatelink-connection-id PRIVATELINK_CONNECTION_ID --user-ip-address PSC_ENDPOINT_IP_ADDRESS + avn service privatelink google connection approve MY_SERVICE_NAME --privatelink-connection-id PRIVATELINK_CONNECTION_ID --user-ip-address PSC_ENDPOINT_IP_ADDRESS As a result, the connection initially transitions to the user-approved state. diff --git a/docs/products/clickhouse/howto/integrate-kafka.rst b/docs/products/clickhouse/howto/integrate-kafka.rst index aba8214490..3f6ae6eb36 100644 --- a/docs/products/clickhouse/howto/integrate-kafka.rst +++ b/docs/products/clickhouse/howto/integrate-kafka.rst @@ -55,8 +55,14 @@ The newly created database name has the following format: `service_KAFKA_SERVICE Update Apache Kafka integration settings ----------------------------------------- -Next step is to configure the topic and data format options for the integration. This will create a virtual table in Aiven for ClickHouse that can receive and send messages from multiple topics. You can have as many of such tables as you need. You need to define for each table following: +Next step is to configure the topic and data format options for the integration. This will create a virtual table in Aiven for ClickHouse that can receive and send messages from multiple topics. You can have as many of such tables as you need. +For each table, there are mandatory and optional setting to be defined. + +Mandatory settings +'''''''''''''''''' + +For each table, you need to define the following: * ``name`` - name of the connector table * ``columns`` - array of columns, with names and types @@ -64,24 +70,108 @@ Next step is to configure the topic and data format options for the integration. * ``data_format`` - your preferred format for data input, see :doc:`../reference/supported-input-output-formats` * ``group_name`` - consumer group name, that will be created on your behalf -Integration settings in a JSON format: - -.. code:: json - - { - "tables": [ - { - "name": "CONNECTOR_TABLE_NAME", - "columns": [ - {"name": "id", "type": "UInt64"}, - {"name": "name", "type": "String"} - ], - "topics": [{"name": "topic1"}, {"name": "topic2"}], - "data_format": "DATA_FORMAT", - "group_name": "CONSUMER_NAME" - } - ] - } +.. topic:: JSON format + + .. code-block:: json + + { + "tables": [ + { + "name": "CONNECTOR_TABLE_NAME", + "columns": [ + {"name": "id", "type": "UInt64"}, + {"name": "name", "type": "String"} + ], + "topics": [{"name": "topic1"}, {"name": "topic2"}], + "data_format": "DATA_FORMAT", + "group_name": "CONSUMER_NAME" + } + ] + } + +Optional settings +''''''''''''''''' + +For each table, you can define the following optional settings: + +.. list-table:: + :widths: 10 30 5 5 5 5 + :header-rows: 1 + + * - Name + - Description + - Default value + - Allowed values + - Minimum value + - Maximum value + * - ``auto_offset_reset`` + - Action to take when there is no initial offset in the offset store or the desired offset is out of range + - ``earliest`` + - ``smallest``, ``earliest``, ``beginning``, ``largest``, ``latest``, ``end`` + - -- + - -- + * - ``date_time_input_format`` + - Method to read ``DateTime`` from text input formats + - ``basic`` + - ``basic``, ``best_effort``, ``best_effort_us`` + - -- + - -- + * - ``handle_error_mode`` + - Method to handle errors for the Kafka engine + - ``default`` + - ``default``, ``stream`` + - -- + - -- + * - ``max_block_size`` + - Number of rows collected by poll(s) for flushing data from Kafka + - ``0`` + - ``0`` - ``1_000_000_000`` + - ``0`` + - ``1_000_000_000`` + * - ``max_rows_per_message`` + - Maximum number of rows produced in one Kafka message for row-based formats + - ``1`` + - ``1`` - ``1_000_000_000`` + - ``1`` + - ``1_000_000_000`` + * - ``num_consumers`` + - Number of consumers per table per replica + - ``1`` + - ``1`` - ``10`` + - ``1`` + - ``10`` + * - ``poll_max_batch_size`` + - Maximum amount of messages to be polled in a single Kafka poll + - ``0`` + - ``0`` - ``1_000_000_000`` + - ``0`` + - ``1_000_000_000`` + * - ``skip_broken_messages`` + - Minimum number of broken messages from Kafka topic per block to be skipped + - ``0`` + - ``0`` - ``1_000_000_000`` + - ``0`` + - ``1_000_000_000`` + +.. topic:: JSON format + + .. code-block:: json + + { + "tables": [ + { + "name": "CONNECTOR_TABLE_NAME", + "columns": [ + {"name": "id", "type": "UInt64"}, + {"name": "name", "type": "String"} + ], + "topics": [{"name": "topic1"}, {"name": "topic2"}], + "data_format": "DATA_FORMAT", + "group_name": "CONSUMER_NAME", + "auto_offset_reset": "earliest" + } + ] + } Configure integration with CLI -------------------------------- @@ -174,21 +264,6 @@ You can also bring the entries from ClickHouse table into the Apache Kafka topic .. _reference: Reference ----------- - -When connecting ClickHouse® to Kafka® using Aiven integrations, data exchange is possible with the following formats only: - -============================ ==================================================================================== -Format Example -============================ ==================================================================================== -CSV ``123,"Hello"`` -JSONASString ``{"x":123,"y":"hello"}`` -JSONCompactEachRow ``[123,"Hello"]`` -JSONCompactStringsEachRow ``["123","Hello"]`` -JSONEachRow ``{"x":123,"y":"hello"}`` -JSONStringsEachRow ``{"x":"123","y":"hello"}`` -MsgPack ``{\xc4\x05hello`` -TSKV ``x=123\ty=hello`` -TSV ``123\thello`` -TabSeparated ``123\thello`` -============================ ==================================================================================== +--------- + +When connecting ClickHouse® to Kafka® using Aiven integrations, data exchange requires using specific formats. Check the supported formats for input and output data in :doc:`Formats for ClickHouse®-Kafka® data exchange `. diff --git a/docs/products/clickhouse/reference/supported-input-output-formats.rst b/docs/products/clickhouse/reference/supported-input-output-formats.rst index f1ff59ebde..a1d4d563d0 100644 --- a/docs/products/clickhouse/reference/supported-input-output-formats.rst +++ b/docs/products/clickhouse/reference/supported-input-output-formats.rst @@ -6,9 +6,12 @@ When connecting ClickHouse® to Kafka® using Aiven integrations, data exchange ============================ ==================================================================================== Format name Notes ============================ ==================================================================================== -Avro Only supports binary Avro format with embedded schema. +Avro Binary Avro format with embedded schema. Libraries and documentation: https://avro.apache.org/ +AvroConfluent Binary Avro with schema registry. + + Requires the Karapace Schema Registry to be enabled in the Kafka service. CSV Example: ``123,"Hello"`` JSONASString Example: ``{"x":123,"y":"hello"}`` JSONCompactEachRow Example: ``[123,"Hello"]`` diff --git a/docs/products/kafka/concepts/kafka-quotas.rst b/docs/products/kafka/concepts/kafka-quotas.rst new file mode 100644 index 0000000000..d4c605e0f2 --- /dev/null +++ b/docs/products/kafka/concepts/kafka-quotas.rst @@ -0,0 +1,45 @@ +Quotas in Aiven for Apache Kafka® +==================================== + +Quotas ensure fair resource allocation, stability, and efficiency in your Kafka cluster. In Aiven for Apache Kafka®, you can :doc:`add quotas <../howto/manage-quotas>` to limit the data or requests exchanged by producers and consumers within a specific period, preventing issues like broker overload, network congestion, and service disruptions caused by excessive or malicious traffic. You can effectively manage resource consumption and ensure optimal user performance by implementing quotas. You can add and manage quotas using `Aiven Console `_ and `Aiven API `_. + +Using quotas offer several benefits: + +* **Resource management:** Quotas prevent individual clients from consuming excessive resources, thus ensuring fairness in resource allocation. +* **Stability:** Setting limits on network throughput and CPU usage helps maintain stability and prevent performance degradation of the Apache Kafka cluster. +* **Efficiency:** Quotas enable you to optimize resource utilization and achieve better overall efficiency within your Kafka deployment. + + + +Supported quota types +----------------------- + +Aiven for Apache Kafka provides different quotas to help you manage resources effectively. These quotas offer benefits in controlling network bandwidth and CPU usage: + +* **Consumer throttle (Network bandwidth quota):** This quota allows you to limit the amount of data a consumer can retrieve from the Kafka cluster per second. Setting a maximum network throughput prevents any single consumer from using excessive network bandwidth. +* **Producer throttle (Network bandwidth quota):** Similar to the consumer throttle, this quota limits the amount of data a producer can send to the Kafka cluster per second. It ensures that producers do not overload the system by sending excessive data, thereby maintaining system stability. +* **CPU throttle:** This quota is about managing CPU usage. You can manage CPU usage by setting a percentage of the total CPU time. Limiting the CPU resources for specific client IDs or users prevents any individual from monopolizing CPU resources, promoting fairness and efficient resource utilization. + + +Client ID and users in quotas +-------------------------------- +**Client ID** and **User** are two types of entities that can be used to enforce quotas in Kafka. + +**Client ID** + A Client ID is a unique identifier assigned to each client application or producer/consumer instance that connects to a Kafka cluster. It helps track the activity and resource usage of individual clients. When configuring quotas, you can set limits based on the Client ID, allowing you to control the amount of resources (such as network bandwidth or CPU) a specific client can utilize. + +**Users** + A User represents the authenticated identity of a client connecting to a cluster. With authentication mechanisms like SASL, users are associated with specific connections. By setting quotas based on Users, resource limits can be enforced per-user. + +Quotas enforcement +------------------- +Quotas enforcement ensures clients stay within their allocated resources. These quotas are implemented and controlled by the brokers on an individual basis. Each client group is assigned a specific quota for every broker, and when this threshold is reached, throttling mechanisms come into action. + +When a client exceeds its quota, the broker calculates the necessary delay to bring the client back within its allocated limits. Subsequently, the broker promptly responds to the client, indicating the duration of the delay. Additionally, the broker suspends communication with the client during this delay period. This cooperative approach from both sides ensures the effective enforcement of quotas. + +Quota violations are swiftly detected using short measurement windows, typically 30 windows of 1 second each. This ensures timely correction and prevents bursts of traffic followed by long delays, providing a better user experience. + +For more information, refer to `Enforcement `_ in the Apache Kafka® official documentation. + +.. seealso:: + * :doc:`How to add and manage quotas <../howto/manage-quotas>` diff --git a/docs/products/kafka/concepts/monitor-consumer-group.rst b/docs/products/kafka/concepts/monitor-consumer-group.rst new file mode 100644 index 0000000000..0d4d0c3f34 --- /dev/null +++ b/docs/products/kafka/concepts/monitor-consumer-group.rst @@ -0,0 +1,55 @@ +Monitoring consumer groups in Aiven for Apache Kafka® +====================================================== + +With Aiven for Apache Kafka® dashboards and telemetry, you can monitor the performance and system resources of your Aiven for Apache Kafka service. Aiven provides pre-built dashboards and telemetry for your service, allowing you to collect and visualize telemetry data using InfluxDB® and Grafana®. Aiven streamlines the process by automatically configuring the dashboards for each of your Aiven for Apache Kafka instances. + +This section builds on the :doc:`service integrations ` documentation and provides an in-depth look at consumer group graphs and related key terminology in Aiven for Apache Kafka®. Consumer group graphs offer valuable insights into the behavior of Apache Kafka consumers, which is crucial for maintaining a continuously running production Kafka system. + + +Topics +--------- +In Apache Kafka®, a topic serves as a unique channel for discussions. Producers send messages to the topic while consumers read those messages. For instance, in a topic named `soccer`, you can read what others say about soccer (acting as a consumer) or post messages about soccer (acting as a producer). + +Topic partitions +----------------- +The storage of messages for an Apache Kafka® topic can be spread across one or more topic partitions. For instance, in a topic that has 100 messages and is set up to have 5 partitions, 20 messages would be assigned to each partition. + +Consumer groups +---------------- + +Apache Kafka® allows multiple consumers to read messages from a Kafka topic. This improves the message consumption rate and overall performance. Organizing consumers into consumer groups identified by a group ID is common practice. Consumer groups consume messages from a topic with messages spread across multiple partitions. Apache Kafka ensures that each message is consumed by only one consumer, which is essential for certain classes of business applications. + +For example, with a topic having 100 messages across five partitions and five consumers in a consumer group, each consumer will be allocated a distinct partition, consuming 20 messages each. + +If the number of consumers exceeds the number of partitions, extra consumers remain idle until an active consumer exits. Also, a consumer cannot read from a partition not assigned to it. + + +Consumer group telemetry +------------------------- +Aiven for Apache Kafka provides built-in consumer group graphs that offer valuable telemetry to monitor and manage consumer groups effectively. + +Consumer group graph: consumer group replication lag +``````````````````````````````````````````````````````` +Consumer group lag is an important metric in your Apache Kafka dashboard. It shows how far behind the consumers in a group are in consuming messages on the topic. A significant lag could indicate one of two scenarios - terminated consumers or consumers who are alive but unable to keep up with the rate of incoming messages. Persistent lag for long durations may indicate that the system is not behaving according to plan, requiring investigation and follow-up actions to resolve the issue. + +The terms ``Consumer group lag`` and ``Consumer group replication lag`` can be used interchangeably. Consumer Group Lag is typically a metric provided by the client side, while Aiven computes its metric known as Consumer Group Replication Lag (``kafka_consumer_group_rep_lag``) by fetching information about partitions and consumer groups from broker side. This metric captures the difference between the latest published offset (high watermark) and the consumer group offset for the same partition. + +The consumer group graph below, which is enabled by default, provides valuable insights into consumer behavior. It displays the consumer group replication lag, indicating how far behind the consumers are in consuming messages from a topic. This graph provides information about consumer behavior, enabling you to take appropriate action if necessary. + + +.. image:: /images/products/kafka/consumer-group-graphs-for-kafka-dashboards.png + :alt: Image of consumer group replication lag + + +Consumer group offset telemetry +````````````````````````````````` +In Apache Kafka, messages are written into a partition as append-only logs and each message is assigned a unique incremental number called the offset. These offsets indicate the exact position of messages within the partition. + +Aiven for Apache Kafka provides offset telemetry, which can help understand message consumption patterns and troubleshoot issues. The ``kafka_consumer_group_offset`` metric identifies the consumer group's most recent committed offset, which can be used to determine its relative position within the assigned partitions. + + + + + + + diff --git a/docs/products/kafka/howto/manage-quotas.rst b/docs/products/kafka/howto/manage-quotas.rst new file mode 100644 index 0000000000..60fc513755 --- /dev/null +++ b/docs/products/kafka/howto/manage-quotas.rst @@ -0,0 +1,51 @@ +Manage quotas +============== +This section provides you with information on how to add and manage quotas for your Aiven for Apache Kafka® service using the `Aiven Console `_. + +For an overview of quotas, see :doc:`Quotas in Aiven for Apache Kafka <../concepts/kafka-quotas>` section for more information. + +.. note:: + To add quotas using APIs, see `Aiven API documentation `_. + +Add quota +------------ + +To add quota to your Aiven for Apache Kafka service, follow these steps: + +1. Log in to `Aiven Console `_ and select the Aiven for Apache Kafka service you want to manage. +2. Select the Quotas tab and click **Add quota**. +3. Enter the **Client ID** or **User** for which you want to set the quota. The *Client ID* represents a unique identifier assigned to a Kafka client, while the *User* refers to the user or user group associated with the client. +4. Choose one of the following quota types and enter the desired value for the selected quota type: + + * **Consumer throttle** (quota limit in bytes per second): Specify the maximum data transfer rate allowed for the consumer. + * **Producer throttle** (quota limit in bytes per second): Specify the maximum data transfer rate allowed for the producer. + * **CPU throttle** (quota limit as a percentage): Specify the maximum CPU usage allowed for the client. + + .. note:: + + Aiven also supports **default** quotas, which can be applied to all clients and/or users by using the keyword **default** in either the client ID or user field. + +5. Select **Add** to add quota. + +Additionally, you can add more quotas by selecting the **Add quota** option on the right-side. + +Update quota +-------------- + +To update an existing quota, follow these steps: + +1. Access the **Quotas** tab within the Aiven Console for your Apache Kafka service. +2. Locate the quota you want to update. +3. From the ellipsis menu, select **Update** to open the **Update quota** screen. +4. Modify the quota value as needed. +5. Select **Save changes** to save the changes and update the quota. + +Delete quota +--------------- +To remove a quota, follow these steps: + +1. Access the **Quotas** tab within the Aiven Console for your Apache Kafka service. +2. Locate the quota you want to delete. +3. From the ellipsis menu, select **Delete**. +4. On the confirmation dialog, select **Delete quota** to delete the quota. + diff --git a/docs/products/kafka/kafka-connect/howto/debezium-source-connector-pg.rst b/docs/products/kafka/kafka-connect/howto/debezium-source-connector-pg.rst index e76a20c41c..7623e65e84 100644 --- a/docs/products/kafka/kafka-connect/howto/debezium-source-connector-pg.rst +++ b/docs/products/kafka/kafka-connect/howto/debezium-source-connector-pg.rst @@ -134,10 +134,21 @@ When creating a Debezium source connector pointing to Aiven for PostgreSQL using Caused by: org.postgresql.util.PSQLException: ERROR: must be superuser to create FOR ALL TABLES publication -The error is due to Debezium trying to create a publication and failing because ``avnadmin`` is not a superuser. To avoid the problem you either: +The error is due to Debezium trying to create a publication and failing because ``avnadmin`` is not a superuser. There are 2 different ways of working around this issue: -* add the `"publication.autocreate.mode": "filtered"` parameter to the Debezium connector configuration to enable the publication creation only for the tables defined in the `table.include.list` parameter -* create the publication on the source database before configuring the connector as defined in the section below. +* either add the ``"publication.autocreate.mode": "filtered"`` parameter to the Debezium connector configuration to enable the publication creation only for the tables defined in the ``table.include.list`` parameter +* or create the publication on the source database before configuring the connector as defined in the section further below. + +Note that with older versions of Debezium, there was a bug preventing the addition of more tables to the filter with ``filtered`` mode. As a result, this configuration was not conflicting with a publication ``FOR ALL TABLES``. Starting with Debezium 1.9.7, those configurations are conflicting and you could get the following error: + +:: + + Caused by: org.postgresql.util.PSQLException: ERROR: publication "dbz_publication" is defined as FOR ALL TABLES + Detail: Tables cannot be added to or dropped from FOR ALL TABLES publications. + +The error is due to Debezium attempting to include more tables into the publication which is incompatible with ``FOR ALL TABLES``. + +You can get rid of this error by removing ``publication.autocreate.mode`` configuration, which will default to ``all_tables``. In case you want to maintain ``filtered`` mode for some reason, then the publication should be recreated accordingly, so as the replication slot. Create the publication in PostgreSQL '''''''''''''''''''''''''''''''''''' diff --git a/docs/products/kafka/kafka-connect/howto/mqtt-source-connector.rst b/docs/products/kafka/kafka-connect/howto/mqtt-source-connector.rst index aea206f208..da665ba1d8 100644 --- a/docs/products/kafka/kafka-connect/howto/mqtt-source-connector.rst +++ b/docs/products/kafka/kafka-connect/howto/mqtt-source-connector.rst @@ -1,5 +1,5 @@ -Create a sink connector from Apache Kafka® to MQTT -================================================== +Create a source connector from Apache Kafka® to MQTT +======================================================= The `MQTT source connector `_ copies messages from the MQTT topic into Apache Kafka® where they can be transformed and read by multiple consumers. Then, the Stream Reactor MQTT source connector creates a queue and binds it to the ``amq.topic`` defined in the KCQL statement, then messages are copied to the Apache Kafka® service. diff --git a/docs/products/kafka/kafka-mirrormaker/howto/integrate-external-kafka-cluster.rst b/docs/products/kafka/kafka-mirrormaker/howto/integrate-external-kafka-cluster.rst index de09ef298b..50e471e29e 100644 --- a/docs/products/kafka/kafka-mirrormaker/howto/integrate-external-kafka-cluster.rst +++ b/docs/products/kafka/kafka-mirrormaker/howto/integrate-external-kafka-cluster.rst @@ -17,3 +17,5 @@ An external Apache Kafka® service integration endpoint can be defined in the `A 4. Fill the **Endpoint name**, **Bootstrap servers** and the security settings and click **Create**. 5. The external Apache Kafka cluster is now available under the alias defined in the **Endpoint name** parameter + +.. note:: Configure the ACLs for both the source and target cluster such that the MirrorMaker 2 service can describe and create topics, as well as produce and consume messages. \ No newline at end of file diff --git a/docs/products/opensearch/concepts/backups.rst b/docs/products/opensearch/concepts/backups.rst index 6436a1ebe3..ceb210bcad 100644 --- a/docs/products/opensearch/concepts/backups.rst +++ b/docs/products/opensearch/concepts/backups.rst @@ -1,7 +1,7 @@ .. _opensearch-backup: -Backups -======= +Aiven for OpenSearch® backups +============================= Aiven for OpenSearch® databases are automatically backed up, :doc:`encrypted `, and stored securely in object storage. Backups are stored in the same region as the main service nodes. diff --git a/docs/products/postgresql/concepts/pgvector.rst b/docs/products/postgresql/concepts/pgvector.rst new file mode 100644 index 0000000000..483b773b35 --- /dev/null +++ b/docs/products/postgresql/concepts/pgvector.rst @@ -0,0 +1,69 @@ +pgvector for AI-powered search in Aiven for PostgreSQL® +======================================================= + +In machine learning (ML) models, all data items in a particular data set are mapped into one unified n-dimensional vector space, no matter how big the input data set is. This optimized way of data representation allows for high performance of AI algorithms. Mapping regular data into a vector space requires so called data vectorizing, which is transforming data items into vectors (data structures with at least two components: magnitude and direction). On the vectorized data, you can perform AI-powered operations using different instruments, one of them being pgvector. + +Discover the pgvector extension to Aiven for PostgreSQL® and learn how it works. Check why you might need it and what benefits you get using it. + +About pgvector +-------------- + +pgvector is an open-source vector extension for similarity search. It's available as an extension to your Aiven for PostgreSQL® services. pgvector introduces capabilities to store and search over data of the vector type (ML-generated embeddings). Applying a specific index type for querying a table, the extension enables you to search for vector's exact nearest or approximate nearest neighbors (data items). + +Vector embeddings +''''''''''''''''' + +In machine learning, real-world objects and concepts (text, images, video, or audio) are represented as a set of continuous numbers residing in a high-dimensional vector space. These numerical representations are called vector embeddings, and the process of transformation into numerical representations is called vector embedding. Vector embedding allows ML algorithms to identify semantic and syntactic relationships between data, find patterns, and make predictions. Vector representations have different applications, for example, information retrieval, image classification, sentiment analysis, natural language processing, or similarity search. + +Vector similarity +''''''''''''''''' + +Since on vector embeddings you can use AI tools for capturing relationships between objects (vector representations), you are also able to identify similarities between them in an easily computable and scalable manner. + +A vector usually represents a data point, and components of the vector correspond to attributes of the data point. +In most cases, vector similarity calculations use distance metrics, for example, by measuring the straight-line distance between two vectors or the cosine of the angle between two vectors. The greater the resulting value of the similarity calculation is, the more similar the vectors are, with 0 as the minimum value and 1 as the maximum value. + +How pgvector works +------------------ + +Enabling pgvector + You enable the extension on your database. +Vectorizing data + You generate embeddings for your data, for example, for a products catalog using tools such as the `OpenAI API `_ client. +Storing embeddings + You store the embeddings in Aiven for PostgreSQL using the pgvector extension. +Querying embeddings + You use the embeddings for the vector similarity search on the products catalog. +Adding indices + By default, pgvector executes the *exact* nearest neighbor search, which gives the perfect recall. If you add an index to use the *approximate* nearest neighbor search, you can speed up your search, trading off some recall for performance. + +Why use pgvector +---------------- + +With the pgvector extension, you can perform the vector similarity search and use embedding techniques directly in Aiven for PostgreSQL. pgvector allows for efficient handling of high-dimensional vector data within the Aiven for PostgreSQL database for tasks such as similarity search, model training, data augmentation, or machine learning. + +pgvector helps you optimize and personalize the similarity search experience by improving searching speed and accuracy (also by adding indices). + +Typical use cases +----------------- + +There are multiple industry applications for similarity searches over vector embeddings: + +* e-commerce +* recommendation systems +* fraud detection + +.. topic:: Examples + + * AI-powered tools can find similarities between products or transactions, which can be used to produce product recommendations or detect potential scams or frauds. + * Sentiment analysis: words represented with similar vector embeddings have similar sentiment scores. + +What's next +----------- + +:doc:`Enable and use pgvector on Aiven for PostgreSQL® ` + +Related reading +--------------- + +`pgvector README on GitHub `_ diff --git a/docs/products/postgresql/concepts/upgrade-failover.rst b/docs/products/postgresql/concepts/upgrade-failover.rst index ee47f99107..cac7bd60d3 100644 --- a/docs/products/postgresql/concepts/upgrade-failover.rst +++ b/docs/products/postgresql/concepts/upgrade-failover.rst @@ -58,6 +58,9 @@ During maintenance updates, cloud migrations, or plan changes, the below procedu .. Note:: The old primary server is kept alive for a short period of time (minimum 60 seconds) with a TCP forwarding setup pointing to the new primary server allowing clients to connect before learning the new IP address. +.. Note:: + If the service plan is changed from a business plan that has two nodes to a startup plan which only has one node of the same tier (for example, business-8 to startup-8), the standby node is removed while the primary node is retained, and connections to the primary are not affected by the downgrade. Similarly, upgrading the service plan from a startup one to a business one adds a standby node to the service cluster, and connections to the primary node are unaffected. + Recreation of replication slots ------------------------------- diff --git a/docs/products/postgresql/howto.rst b/docs/products/postgresql/howto.rst index 3b963bac71..c7ed4df4f3 100644 --- a/docs/products/postgresql/howto.rst +++ b/docs/products/postgresql/howto.rst @@ -38,6 +38,7 @@ Aiven for PostgreSQL® how-tos - :doc:`Optimize PostgreSQL® slow queries ` - :doc:`Check and avoid transaction ID wraparound ` - :doc:`Prevent PostgreSQL® full disk issues ` + - :doc:`Enable and use pgvector on Aiven for PostgreSQL® ` .. dropdown:: Migration diff --git a/docs/products/postgresql/howto/datasource-integration.rst b/docs/products/postgresql/howto/datasource-integration.rst index 8ef21344e0..8582f8860a 100644 --- a/docs/products/postgresql/howto/datasource-integration.rst +++ b/docs/products/postgresql/howto/datasource-integration.rst @@ -1,14 +1,14 @@ Connect two PostgreSQL® services via datasource integration =========================================================== -There are two types of datasource integrations you can use with Aiven for PostgreSQL®: :doc:`Aiven for Grafana® `, and another Aiven for PostgreSQL® service. If you are connecting two PostgreSQL® services together, perhaps to :doc:`query across them `, but still want to have a restricted IP allow-list, then you will need to use the ``Allow IP-List`` service integration. +There are two types of datasource integrations you can use with Aiven for PostgreSQL®: :doc:`Aiven for Grafana® `, and another Aiven for PostgreSQL® service. If you are connecting two PostgreSQL® services together, perhaps to :doc:`query across them `, but still want to have a restricted IP allow-list, then you will need to use the ``IP Allow-List`` service integration. -Whenever a service node needs to be recycled, e.g. for maintenance, a new node is created with a new IP address. As the new IP address cannot be predicted, if you want to maintain a connection between two PostgreSQL services your choices are either to have a very broad IP allow-list (which might be acceptable in the private IP-range of a project VPC) or to use the ``Allow IP-List`` service integration to dynamically create an IP allow-list entry for the other PostgreSQL service. +Whenever a service node needs to be recycled, e.g. for maintenance, a new node is created with a new IP address. As the new IP address cannot be predicted, if you want to maintain a connection between two PostgreSQL services your choices are either to have a very broad IP allow-list (which might be acceptable in the private IP-range of a project VPC) or to use the ``IP Allow-List`` service integration to dynamically create an IP allow-list entry for the other PostgreSQL service. Integrate two PostgreSQL services --------------------------------- -1. On the service overview page for your PostgreSQL service, go to **Manage Integrations** and choose the **Allow IP-List** option. +1. On the service overview page for your PostgreSQL service, go to **Manage Integrations** and choose the **IP Allow-List** option. 2. Choose either a new or existing PostgreSQL service. diff --git a/docs/products/postgresql/howto/list-dba-tasks.rst b/docs/products/postgresql/howto/list-dba-tasks.rst index 646dfdc925..56c167c846 100644 --- a/docs/products/postgresql/howto/list-dba-tasks.rst +++ b/docs/products/postgresql/howto/list-dba-tasks.rst @@ -66,3 +66,7 @@ Database administration tasks .. grid-item-card:: :doc:`Prevent PostgreSQL® full disk issues ` :shadow: md :margin: 2 2 0 0 + + .. grid-item-card:: :doc:`Enable and use pgvector on Aiven for PostgreSQL® ` + :shadow: md + :margin: 2 2 0 0 diff --git a/docs/products/postgresql/howto/report-metrics-grafana.rst b/docs/products/postgresql/howto/report-metrics-grafana.rst index 5802dff5d3..ef99ee4492 100644 --- a/docs/products/postgresql/howto/report-metrics-grafana.rst +++ b/docs/products/postgresql/howto/report-metrics-grafana.rst @@ -4,25 +4,22 @@ Monitor PostgreSQL® metrics with Grafana® As well as offering PostgreSQL-as-a-service, the Aiven platform gives you access to monitor the database. The metrics/dashboard integration in the Aiven console lets you send PostgreSQL® metrics to an external endpoint like Datadog or to create an integration and a :doc:`prebuilt dashboard <../reference/pg-metrics>` in Aiven for Grafana®. Get detailed information about the metrics and dashboard sections in :doc:`../reference/pg-metrics`. -Push PostgreSQL metrics to InfluxDB®, M3DB or PostgreSQL --------------------------------------------------------- +Push PostgreSQL metrics to InfluxDB® or M3DB +-------------------------------------------- To collect metrics about your PostgreSQL service you will need to configure a metrics integration and nominate somewhere to store the collected metrics. -1. On the service overview page for your PostgreSQL service, go to "Manage Integrations" and choose the "Metrics" option with "**Send** service metrics to InfluxDB, M3DB or PostgreSQL service" as its description. +1. On the service overview page for your PostgreSQL service, go to "Manage Integrations" and choose the "Metrics" option with "**Send** service metrics to InfluxDB or M3DB service" as its description. -2. Choose either a new or existing InfluxDB®, M3DB or PostgreSQL service. +2. Choose either a new or existing InfluxDB® or M3DB service. - A new service will ask you to select the cloud, region and plan to use. You should also give your service a name. The service overview page shows the nodes rebuilding, and then indicates when they are ready. - - If you're already using InfluxDB, M3DB or PostgreSQL on Aiven, you can submit your PostgreSQL metrics to the existing service. - -.. Warning:: - You can send your PostgreSQL service metrics to the same instance. This is not recommended since it increases the load on the monitored system and could also be affected in the event of problems with the database. + - If you're already using InfluxDB or M3DB on Aiven, you can submit your PostgreSQL metrics to the existing service. Provision and configure Grafana ------------------------------- -3. Select the target InfluxDB, M3DB or PostgreSQL database service and go to its service page. Under "Manage Integrations", choose the "Dashboard" option to make the metrics available on that platform. +3. Select the target InfluxDB or M3DB service and go to its service page. Under "Manage Integrations", choose the "Dashboard" option to make the metrics available on that platform. 4. Choose either a new or existing Grafana service. - A new service will ask you to select the cloud, region and plan to use. You should also give your service a name. The service overview page shows the nodes rebuilding, and then indicates when they are ready. diff --git a/docs/products/postgresql/howto/use-pgvector.rst b/docs/products/postgresql/howto/use-pgvector.rst new file mode 100644 index 0000000000..0cdea7446c --- /dev/null +++ b/docs/products/postgresql/howto/use-pgvector.rst @@ -0,0 +1,107 @@ +Enable and use pgvector on Aiven for PostgreSQL® +================================================ + +This article provides step-by-step instructions on enabling, using, and disabling the pgvector extension for your Aiven for PostgreSQL service. + +About using pgvector +-------------------- + +The pgvector extension allows you to perform the vector similarity search and use embedding techniques directly in Aiven for PostgreSQL. See :doc:`pgvector for AI-powered search ` for more information on what pgvector is and how it works. + +Prerequisites +------------- + +* Aiven account +* Aiven for PostgreSQL service running on PostgreSQL 13 or newer PostgreSQL versions +* psql and a psql CLI client +* Vector embeddings generated (for example, with the `OpenAI API `_ client) + +Enable pgvector +--------------- + +Run the CREATE EXTENSION statement from a client such as psql connected to your service. This is needed for each database you want to perform the similarity search on. + +1. :doc:`Connect to your Aiven for PostgreSQL service ` using, for example, the psql client (CLI). +2. Connect to your database where you want to operate. + + .. code-block:: bash + + \c database-name + +3. Run the CREATE EXTENSION statement. + + .. code-block:: bash + + CREATE EXTENSION vector; + +Store embeddings +---------------- + +1. Create a table to store the generated vector embeddings. Use the CREATE TABLE SQL command, adjusting the dimensions as needed. + + .. code-block:: bash + + CREATE TABLE items (id bigserial PRIMARY KEY, embedding vector(3)); + + .. note:: + + As a result, the ``items`` table is created. The table includes the ``embedding`` column, which can store vectors with three dimensions. + +2. Run the INSERT statement to store the embeddings generated with, for example, the `OpenAI API `_ client. + + .. code-block:: bash + + INSERT INTO items (embedding) VALUES ('[1,2,3]'), ('[4,5,6]'); + + .. note:: + + As a result, two new rows are inserted into the ``items`` table with the provided embeddings. + +Perform similarity search +------------------------- + +To calculate similarity, run the SELECT statements using the built-in vector operators. + +.. code-block:: bash + + SELECT * FROM items ORDER BY embedding <-> '[3,1,2]' LIMIT 5; + +.. note:: + + As a result, the query computes the L2 distance between the selected vector and the vectors stored in the ``items`` table, arrange the results based on the calculated distance, and outputs its top five nearest neighbors (most similar items). + +.. topic:: Operators for calculating similarity + + * ``<->`` - Euclidean distance (L2 distance) + * ``<#>`` - negative inner product + * ``<=>`` - cosine distance + +Add indices +----------- + +You can add an index on the vector column to use the *approximate* nearest neighbor search (instead of the default the *exact* nearest neighbor search). This can improve query performance with an ignorable cost on recall. Add an index is possible for all distance functions (L2 distance, cosine distance, inner product). + +To add an index, run a query similar to the following: + +.. code-block:: bash + + CREATE INDEX ON items USING ivfflat (embedding vector_l2_ops) WITH (lists = 100); + +.. note:: + + As a result, the index is added to the ``embedding`` column for the L2 distance function. + +Disable pgvector +---------------- + +To stop the pgvector extension and remove it from a database, run the following SQL command: + +.. code-block:: bash + + DROP EXTENSION vector; + +Related reading +--------------- + +* :doc:`pgvector for AI-powered search in Aiven for PostgreSQL® ` +* `pgvector README on GitHub `_ diff --git a/docs/products/postgresql/reference/list-of-extensions.rst b/docs/products/postgresql/reference/list-of-extensions.rst index d256d4acde..98ff081de9 100644 --- a/docs/products/postgresql/reference/list-of-extensions.rst +++ b/docs/products/postgresql/reference/list-of-extensions.rst @@ -39,6 +39,9 @@ Data types ``ltree`` - https://www.postgresql.org/docs/current/ltree.html Data type for hierarchical tree-like structures. +``pgvector`` - https://github.com/pgvector/pgvector + Type for vector similarity search. |PG13onwards| + ``seg`` - https://www.postgresql.org/docs/current/seg.html Data type for representing line segments or floating-point intervals. diff --git a/docs/products/postgresql/reference/pg-connection-limits.rst b/docs/products/postgresql/reference/pg-connection-limits.rst index ba9b24a44f..0028a7bcf2 100644 --- a/docs/products/postgresql/reference/pg-connection-limits.rst +++ b/docs/products/postgresql/reference/pg-connection-limits.rst @@ -8,7 +8,7 @@ Aiven for PostgreSQL® instances limit the number of allowed connections to make * - Plan - Max Connections - * - Hobbyist + * - Hobbyist (Google Cloud, DigitalOcean, and UpCloud only) - 25 * - Startup/Business/Premium-4 - 100 diff --git a/docs/products/redis/howto/configure-acl-permissions.rst b/docs/products/redis/howto/configure-acl-permissions.rst index 7ab7f00a55..804362bc7a 100644 --- a/docs/products/redis/howto/configure-acl-permissions.rst +++ b/docs/products/redis/howto/configure-acl-permissions.rst @@ -1,50 +1,79 @@ Configure ACL permissions in Aiven for Redis®* ============================================== -Use the Aiven console or the Aiven client to create custom Access Control Lists (ACLs). +Redis®* uses `Access Control Lists (ACLs) `_ to restrict the usage of commands and keys based on specific username and password combinations. In Aiven for Redis®*, the direct use of `ACL * `_ commands is not allowed to maintain the reliability of replication, configuration management, and disaster recovery backups for the default user. However, you have the flexibility to create custom ACLs using either the `Aiven Console `_ or the :doc:`Aiven CLI `. -Redis®* uses `ACLs `_ to restrict the usage of commands and keys available for connecting for a specific username and password. Aiven for Redis®*, however, does not allow use of the `ACL * `_ commands directly in order to guarantee the reliability of replication, configuration management, or backups for disaster recovery for the default user. You can use the console or the client to create custom ACLs instead. +With the Aiven Console or Aiven CLI, you can customize ACL permissions to align with your requirements. This gives you granular control over access and ensures optimal security within your Aiven for Redis®* service. -Create an ACL using the Aiven console -------------------------------------- +Create user and configure ACLs using console +----------------------------------------------- +Follow the steps below to create a Redis user and configure ACLs: -1. Log in to the `Aiven web console `_. +1. Log in to `Aiven Console `_ and select your Aiven for Redis service from the list of available services. +2. From the **Overview** page of your Redis service, navigate to the **Users** tab. +3. Select **Create user**, and provide the following details: + + * **Username:** Specify a username for the user. + * **Categories:** Specify the command categories the user can access within Aiven for Redis. For example, you can use the prefix ``+@all`` or a similar convention to grant users access to all categories. Separate each category entry with a single space. + * **Commands:** Specify the commands the user can execute, separating each command by a single space. For example, you can enter ``+set -get`` to grant the user permission to execute the SET command and deny access to the GET command. + * **Channels:** Specify the channels the user can access within the Publish/Subscribe (Pub/Sub) messaging pattern. Separate each channel entry with a single space. + * **Keys:** Specify the keys the user can interact with. For example, you can specify keys like ``user:123`` or ``product:456``, or ``order:789`` to grant the user access to interact with these specific keys in Aiven for Redis. + +4. Once you have defined the ACL permissions for the user, select **Save** to create the user. -2. From the *Services* page, select the Redis service you want to create an ACL for. - The *Overview* page for the service opens. +User management +---------------- +You have various management options available for Aiven for Redis users. Follow the instructions below for each operation: -3. Click the **Users and ACL**. +Reset password +````````````````` +1. In the **Users** tab, locate the user you want to reset the password and select the ellipses next to their row. +2. Select **Reset password** from the drop-down menu. +3. Confirm the password reset by selecting **Reset** on the confirmation screen. -4. Click **+ Add Service User**. +Edit ACL rules +``````````````` +1. In the **Users** tab, locate the user you want to edit ACL rules and select the ellipses next to their row. +2. Select **Edit ACL rules** from the drop-down menu. +3. Make the desired changes to the ACL rules on the **Edit access control** screen. +4. Select the **Save** to apply the modifications. - The *New Redis User* pop-up opens. +Duplicate user +``````````````` +1. In the **Users** tab, locate the user you want to duplicate and select the icon next to their row. +2. Select **Duplicate user** from the options in the drop-down menu. +3. Enter a name for the new user in the **Duplicate user** screen. +4. Click on the **Add user** button to create a duplicate user. -5. Create a user, and define which **Keys**, **Categories**, **Commands** or **Channels** the user can access. +Delete user +````````````` +1. Locate the user you want to delete from the user list and select the icon next to their row. +2. Select **Delete** from the options in the drop-down menu. +3. Confirm the deletion by selecting **Delete** on the confirmation screen. - In this example, the ``test`` user can only retrieve keys with the pattern ``mykeys.*``. - .. image:: /images/products/redis/redis-acl.png - :alt: Screenshot of the ACL configuration screen +Create user and configure ACLs using Aiven CLI +----------------------------------------------- -6. Click **Save**. +To create a user and configure ACLs using the Aiven CLI, follow these steps: +1. Set up the :doc:`CLI tool `. -Create an ACL using the Aiven CLI ---------------------------------- +2. Create a user named ``mynewuser`` with read-only access to the ``mykeys.*`` keys using the following command: -1. Set up the :doc:`CLI tool ` if you don't have it already. + :: -2. Create a user for ``mynewuser`` with read-only access to the ``mykeys.*`` keys:: + avn service user-create --project myproject myservicename --username mynewuser --redis-acl-keys 'mykeys.*' --redis-acl-commands '+get' --redis-acl-categories '' - avn service user-create --project myproject myservicename --username mynewuser --redis-acl-keys 'mykeys.*' --redis-acl-commands '+get' --redis-acl-categories '' +3. Confirm the ACL is applied by connecting to the service using the new username and password: + + :: -3. Confirm the ACL is applied by connecting to the service using the new username and password:: + redis-cli --user mynewuser --pass ... --tls -h myservice-myproject.aivencloud.com -p 12719 - redis-cli --user mynewuser --pass ... --tls -h myservice-myproject.aivencloud.com -p 12719 - - myservice-myproject.aivencloud.com:12719> get mykeys.hello - (nil) - myservice-myproject.aivencloud.com:12719> set mykeys.hello world - (error) NOPERM this user has no permissions to run the 'set' command or its subcommand + myservice-myproject.aivencloud.com:12719> get mykeys.hello + (nil) + myservice-myproject.aivencloud.com:12719> set mykeys.hello world + (error) NOPERM this user has no permissions to run the 'set' command or its subcommand diff --git a/images/platform/concepts/backup_location_preview.png b/images/platform/concepts/backup_location_preview.png new file mode 100644 index 0000000000..07e0ed1bde Binary files /dev/null and b/images/platform/concepts/backup_location_preview.png differ diff --git a/images/platform/howto/aws-marketplace-listing.png b/images/platform/howto/aws-marketplace-listing.png new file mode 100644 index 0000000000..7b20b5efe1 Binary files /dev/null and b/images/platform/howto/aws-marketplace-listing.png differ diff --git a/images/products/kafka/consumer-group-graphs-for-kafka-dashboards.png b/images/products/kafka/consumer-group-graphs-for-kafka-dashboards.png new file mode 100644 index 0000000000..7ae6cd2598 Binary files /dev/null and b/images/products/kafka/consumer-group-graphs-for-kafka-dashboards.png differ diff --git a/includes/clouds-list.rst b/includes/clouds-list.rst index 0ee77acf43..7260ff68e7 100644 --- a/includes/clouds-list.rst +++ b/includes/clouds-list.rst @@ -26,6 +26,9 @@ Amazon Web Services * - Asia-Pacific - ``aws-ap-south-1`` - Asia, India + * - Asia-Pacific + - ``aws-ap-south-2`` + - Asia, India * - Asia-Pacific - ``aws-ap-southeast-1`` - Asia, Singapore @@ -35,15 +38,24 @@ Amazon Web Services * - Australia - ``aws-ap-southeast-2`` - Australia, New South Wales + * - Australia + - ``aws-ap-southeast-4`` + - Australia, Melbourne * - Europe - ``aws-eu-central-1`` - Europe, Germany + * - Europe + - ``aws-eu-central-2`` + - Europe, Switzerland * - Europe - ``aws-eu-north-1`` - Europe, Sweden * - Europe - ``aws-eu-south-1`` - Europe, Italy + * - Europe + - ``aws-eu-south-2`` + - Europe, Spain * - Europe - ``aws-eu-west-1`` - Europe, Ireland @@ -143,6 +155,9 @@ Azure * - Europe - ``azure-norway-west`` - Europe, Norway + * - Europe + - ``azure-sweden-central`` + - Europe, Gävle * - Europe - ``azure-switzerland-north`` - Europe, Switzerland @@ -155,6 +170,9 @@ Azure * - Europe - ``azure-westeurope`` - Europe, Netherlands + * - Middle East + - ``azure-qatar-central`` + - Middle East, Doha * - Middle East - ``azure-uae-north`` - Middle East, United Arab Emirates @@ -188,6 +206,9 @@ Azure * - North America - ``azure-westus2`` - United States, Washington + * - North America + - ``azure-westus3`` + - United States, Phoenix * - South America - ``azure-brazilsouth`` - South America, Brazil diff --git a/includes/config-cassandra.rst b/includes/config-cassandra.rst index 17e4a02e15..9902e1f044 100644 --- a/includes/config-cassandra.rst +++ b/includes/config-cassandra.rst @@ -117,3 +117,19 @@ +``backup_hour`` +--------------- +*['integer', 'null']* + +**Hour of the day (in UTC) when the backup for the service is started. A new backup is only started if the previous backup has already completed.** + + + +``backup_minute`` +----------------- +*['integer', 'null']* + +**Minute of the hour when the backup for the service is started. A new backup is only started if the previous backup has already completed.** + + + diff --git a/includes/config-kafka.rst b/includes/config-kafka.rst index b64f5634fe..9ee3189596 100644 --- a/includes/config-kafka.rst +++ b/includes/config-kafka.rst @@ -575,6 +575,12 @@ **producer.linger.ms** Wait for up to the given delay to allow batching records together +``producer_max_request_size`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +*integer* + +**producer.max.request.size** The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. + ``consumer_enable_auto_commit`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ *boolean* diff --git a/index.rst b/index.rst index 6017d254a3..09add91bb6 100644 --- a/index.rst +++ b/index.rst @@ -37,7 +37,6 @@ Get started with Aiven's fully-managed services. .. button-link:: docs/products/kafka - :align: right :color: primary :outline: @@ -53,7 +52,6 @@ Get started with Aiven's fully-managed services. Framework for definining powerful transformations of batch and streaming data sets. .. button-link:: docs/products/flink - :align: right :color: primary :outline: @@ -68,7 +66,6 @@ Get started with Aiven's fully-managed services. High performance storage solution for large data quantities. This specialist data solution is a partitioned row store. .. button-link:: docs/products/cassandra - :align: right :color: primary :outline: @@ -84,7 +81,6 @@ Get started with Aiven's fully-managed services. A highly scalable, open source database that uses a column-oriented structure. .. button-link:: docs/products/clickhouse - :align: right :color: primary :outline: @@ -99,7 +95,6 @@ Get started with Aiven's fully-managed services. The visualization tool you need to explore and understand your data. Grafana integrates with the other services in just a few clicks. .. button-link:: docs/products/grafana - :align: right :color: primary :outline: @@ -115,7 +110,6 @@ Get started with Aiven's fully-managed services. Specialist time series database, with good tooling support. .. button-link:: docs/products/influxdb - :align: right :color: primary :outline: @@ -130,7 +124,6 @@ Get started with Aiven's fully-managed services. Distributed time-series database for scalable solutions, with M3 Coordinator included, and M3 Aggregator also available. .. button-link:: docs/products/m3db - :align: right :color: primary :outline: @@ -146,7 +139,6 @@ Get started with Aiven's fully-managed services. Popular and much-loved relational database platform. .. button-link:: docs/products/mysql - :align: right :color: primary :outline: @@ -161,7 +153,6 @@ Get started with Aiven's fully-managed services. Document database with specialist search features, bring your freeform documents, logs or metrics, and make sense of them here. .. button-link:: docs/products/opensearch - :align: right :color: primary :outline: @@ -177,7 +168,6 @@ Get started with Aiven's fully-managed services. Powerful relational database platform. We have the latest versions, and an excellent selection of extensions. .. button-link:: docs/products/postgresql - :align: right :color: primary :outline: @@ -192,7 +182,6 @@ Get started with Aiven's fully-managed services. In-memory data store for all your high-peformance short-term storage and caching needs. .. button-link:: docs/products/redis - :align: right :color: primary :outline: @@ -218,7 +207,6 @@ Interfaces Web-based graphical interface for creating and managing your services. .. button-link:: docs/tools/aiven-console - :align: center :color: primary :outline: @@ -233,7 +221,6 @@ Interfaces Command line client for the Aiven platform. .. button-link:: docs/tools/cli - :align: center :color: primary :outline: @@ -253,7 +240,6 @@ Automation A public API you can use for programmatic integrations. .. button-link:: docs/tools/api - :align: right :color: primary :outline: @@ -268,7 +254,6 @@ Automation An infrastructure-as-code tool for lifecycle management of your Aiven resources. .. button-link:: docs/tools/terraform - :align: right :color: primary :outline: @@ -283,7 +268,6 @@ Automation Provision and manage Aiven services from your Kubernetes cluster. .. button-link:: https://docs.aiven.io/docs/tools/kubernetes.html - :align: right :color: primary :outline: diff --git a/requirements.txt b/requirements.txt index b38d755b37..5d2efb7705 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,8 +7,9 @@ sphinx-external-toc==0.2.3 sphinx-copybutton==0.5.0 sphinx_gitstamp==0.3.2 beautifulsoup4==4.9.3 +lxml==4.9.2 opensearch-py==1.0.0 -requests==2.25.1 +requests==2.31.0 sphinxext-opengraph==0.4.2 sphinx-sitemap==2.2.0 sphinx-notfound-page==0.8 diff --git a/scripts/postprocess_sitemap.py b/scripts/postprocess_sitemap.py new file mode 100644 index 0000000000..e26434b73a --- /dev/null +++ b/scripts/postprocess_sitemap.py @@ -0,0 +1,25 @@ +from bs4 import BeautifulSoup + +with open('./_build/html/sitemap.xml', 'r') as f: + contents = f.read() + +soup = BeautifulSoup(contents, 'xml') + +urls = soup.find_all('url') + +for url in urls: + loc = url.find('loc') + text = loc.string + # Remove the 'gen' and '404' pages + if '404' in text: + url.decompose() + continue + if text.endswith('genindex.html'): + loc.string = text[:-5] # removes the ".html" + elif text.endswith('index.html'): + loc.string = text[:-10] # removes the "index.html" + elif text.endswith('.html'): + loc.string = text[:-5] # removes the ".html" + +with open('./_build/html/sitemap.xml', 'w') as f: + f.write(str(soup))