diff --git a/DO_OPENAPI_COMMIT_SHA.txt b/DO_OPENAPI_COMMIT_SHA.txt index 7f659175..c3d46f6e 100644 --- a/DO_OPENAPI_COMMIT_SHA.txt +++ b/DO_OPENAPI_COMMIT_SHA.txt @@ -1 +1 @@ -6f7c147 +f4200bf diff --git a/src/pydo/aio/operations/_operations.py b/src/pydo/aio/operations/_operations.py index 09d144f7..c1b95574 100644 --- a/src/pydo/aio/operations/_operations.py +++ b/src/pydo/aio/operations/_operations.py @@ -79,8 +79,10 @@ build_databases_add_request, build_databases_add_user_request, build_databases_create_cluster_request, + build_databases_create_kafka_topic_request, build_databases_create_replica_request, build_databases_delete_connection_pool_request, + build_databases_delete_kafka_topic_request, build_databases_delete_online_migration_request, build_databases_delete_request, build_databases_delete_user_request, @@ -91,6 +93,7 @@ build_databases_get_config_request, build_databases_get_connection_pool_request, build_databases_get_eviction_policy_request, + build_databases_get_kafka_topic_request, build_databases_get_migration_status_request, build_databases_get_replica_request, build_databases_get_request, @@ -100,6 +103,7 @@ build_databases_list_clusters_request, build_databases_list_connection_pools_request, build_databases_list_firewall_rules_request, + build_databases_list_kafka_topics_request, build_databases_list_options_request, build_databases_list_replicas_request, build_databases_list_request, @@ -111,6 +115,7 @@ build_databases_update_connection_pool_request, build_databases_update_eviction_policy_request, build_databases_update_firewall_rules_request, + build_databases_update_kafka_topic_request, build_databases_update_maintenance_window_request, build_databases_update_major_version_request, build_databases_update_online_migration_request, @@ -68371,6 +68376,30 @@ async def list_options(self, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { "options": { + "kafka": { + "layouts": [ + { + "num_nodes": 0, # Optional. An array of + objects, each indicating the node sizes (otherwise referred to as + slugs) that are available with various numbers of nodes in the + database cluster. Each slugs denotes the node's identifier, CPU, + and RAM (in that order). + "sizes": [ + "str" # Optional. An array of + objects containing the slugs available with various node + counts. + ] + } + ], + "regions": [ + "str" # Optional. An array of strings containing the + names of available regions. + ], + "versions": [ + "str" # Optional. An array of strings containing the + names of available regions. + ] + }, "mongodb": { "layouts": [ { @@ -68469,6 +68498,19 @@ async def list_options(self, **kwargs: Any) -> JSON: } }, "version_availability": { + "kafka": [ + { + "end_of_availability": "str", # Optional. A + timestamp referring to the date when the particular version will no + longer be available for creating new clusters. If null, the version + does not have an end of availability timeline. + "end_of_life": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be + supported. If null, the version does not have an end of life + timeline. + "version": "str" # Optional. The engine version. + } + ], "mongodb": [ { "end_of_availability": "str", # Optional. A @@ -68663,8 +68705,9 @@ async def list_clusters( ], "engine": "str", # A slug representing the database engine used for the cluster. The possible values are: "pg" for PostgreSQL, - "mysql" for MySQL, "redis" for Redis, and "mongodb" for MongoDB. - Required. Known values are: "pg", "mysql", "redis", and "mongodb". + "mysql" for MySQL, "redis" for Redis, "mongodb" for MongoDB, and "kafka" + for Kafka. Required. Known values are: "pg", "mysql", "redis", "mongodb", + and "kafka". "id": "str", # Optional. A unique ID that can be used to identify and reference a database cluster. "maintenance_window": { @@ -68743,6 +68786,10 @@ async def list_clusters( ], "users": [ { + "access_cert": "str", # Optional. Access + certificate for TLS client authentication. (Kafka only). + "access_key": "str", # Optional. Access key + for TLS client authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for @@ -68758,10 +68805,31 @@ async def list_clusters( user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An + identifier for the ACL. Required. + "permission": "str", + # Permission set applied to the ACL. 'consume' allows + for messages to be consumed from the topic. 'produce' + allows for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for + 'produceconsume' as well as any operations to + administer the topic (delete, update). Required. + Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str" # A + regex for matching the topic(s) that this ACL should + apply to. Required. + } + ] + } } ], "version": "str", # Optional. A string representing the @@ -68925,8 +68993,8 @@ async def create_cluster( ], "engine": "str", # A slug representing the database engine used for the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, "redis" - for Redis, and "mongodb" for MongoDB. Required. Known values are: "pg", "mysql", - "redis", and "mongodb". + for Redis, "mongodb" for MongoDB, and "kafka" for Kafka. Required. Known values + are: "pg", "mysql", "redis", "mongodb", and "kafka". "id": "str", # Optional. A unique ID that can be used to identify and reference a database cluster. "maintenance_window": { @@ -68998,6 +69066,10 @@ async def create_cluster( ], "users": [ { + "access_cert": "str", # Optional. Access certificate for TLS + client authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -69010,9 +69082,28 @@ async def create_cluster( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the database - user's role. The value will be either"n"primary" or "normal". Known - values are: "primary" and "normal". + "role": "str", # Optional. A string representing the + database user's role. The value will be either"n"primary" or "normal". + Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier for the + ACL. Required. + "permission": "str", # Permission + set applied to the ACL. 'consume' allows for messages to be + consumed from the topic. 'produce' allows for messages to be + published to the topic. 'produceconsume' allows for both + 'consume' and 'produce' permission. 'admin' allows for + 'produceconsume' as well as any operations to administer the + topic (delete, update). Required. Known values are: "admin", + "consume", "produce", and "produceconsume". + "topic": "str" # A regex for + matching the topic(s) that this ACL should apply to. + Required. + } + ] + } } ], "version": "str", # Optional. A string representing the version of the @@ -69053,366 +69144,8 @@ async def create_cluster( ], "engine": "str", # A slug representing the database engine used for the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, - "redis" for Redis, and "mongodb" for MongoDB. Required. Known values are: - "pg", "mysql", "redis", and "mongodb". - "id": "str", # Optional. A unique ID that can be used to identify - and reference a database cluster. - "maintenance_window": { - "day": "str", # The day of the week on which to apply - maintenance updates. Required. - "description": [ - "str" # Optional. A list of strings, each containing - information about a pending maintenance update. - ], - "hour": "str", # The hour in UTC at which maintenance - updates will be applied in 24 hour format. Required. - "pending": bool # Optional. A boolean value indicating - whether any maintenance is scheduled to be performed in the next window. - }, - "name": "str", # A unique, human-readable name referring to a - database cluster. Required. - "num_nodes": 0, # The number of nodes in the database cluster. - Required. - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the database. - }, - "private_network_uuid": "str", # Optional. A string specifying the - UUID of the VPC to which the database cluster will be assigned. If excluded, - the cluster when creating a new database cluster, it will be assigned to your - account's default VPC for the region. - "project_id": "str", # Optional. The ID of the project that the - database cluster is assigned to. If excluded when creating a new database - cluster, it will be assigned to your default project. - "region": "str", # The slug identifier for the region where the - database cluster is located. Required. - "rules": [ - { - "cluster_uuid": "str", # Optional. A unique ID for - the database cluster to which the rule is applied. - "created_at": "2020-02-20 00:00:00", # Optional. A - time value given in ISO8601 combined date and time format that - represents when the firewall rule was created. - "type": "str", # The type of resource that the - firewall rule allows to access the database cluster. Required. Known - values are: "droplet", "k8s", "ip_addr", "tag", and "app". - "uuid": "str", # Optional. A unique ID for the - firewall rule itself. - "value": "str" # The ID of the specific resource, - the name of a tag applied to a group of resources, or the IP address - that the firewall rule allows to access the database cluster. - Required. - } - ], - "semantic_version": "str", # Optional. A string representing the - semantic version of the database engine in use for the cluster. - "size": "str", # The slug identifier representing the size of the - nodes in the database cluster. Required. - "status": "str", # Optional. A string representing the current - status of the database cluster. Known values are: "creating", "online", - "resizing", "migrating", and "forking". - "tags": [ - "str" # Optional. An array of tags that have been applied to - the database cluster. - ], - "users": [ - { - "mysql_settings": { - "auth_plugin": "str" # A string specifying - the authentication method to be used for connections"nto the - MySQL user account. The valid values are - ``mysql_native_password``"nor ``caching_sha2_password``. If - excluded when creating a new user, the"ndefault for the version - of MySQL in use will be used. As of MySQL 8.0, the"ndefault is - ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". - }, - "name": "str", # The name of a database user. - Required. - "password": "str", # Optional. A randomly generated - password for the database user. - "role": "str" # Optional. A string representing the - database user's role. The value will be either"n"primary" or - "normal". Known values are: "primary" and "normal". - } - ], - "version": "str", # Optional. A string representing the version of - the database engine in use for the cluster. - "version_end_of_availability": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be available - for creating new clusters. If null, the version does not have an end of - availability timeline. - "version_end_of_life": "str" # Optional. A timestamp referring to - the date when the particular version will no longer be supported. If null, - the version does not have an end of life timeline. - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - async def create_cluster( - self, body: IO, *, content_type: str = "application/json", **kwargs: Any - ) -> JSON: - """Create a New Database Cluster. - - To create a database cluster, send a POST request to ``/v2/databases``. - The response will be a JSON object with a key called ``database``. The value of this will be an - object that contains the standard attributes associated with a database cluster. The initial - value of the database cluster's ``status`` attribute will be ``creating``. When the cluster is - ready to receive traffic, this will transition to ``online``. - The embedded ``connection`` and ``private_connection`` objects will contain the information - needed to access the database cluster. - DigitalOcean managed PostgreSQL and MySQL database clusters take automated daily backups. To - create a new database cluster based on a backup of an existing cluster, send a POST request to - ``/v2/databases``. In addition to the standard database cluster attributes, the JSON body must - include a key named ``backup_restore`` with the name of the original database cluster and the - timestamp of the backup to be restored. Creating a database from a backup is the same as - forking a database in the control panel. - Note: Backups are not supported for Redis clusters. - - :param body: Required. - :type body: IO - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 201 - response == { - "database": { - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the database. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the database - cluster was created. - "db_names": [ - "str" # Optional. An array of strings containing the names - of databases created in the database cluster. - ], - "engine": "str", # A slug representing the database engine used for - the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, - "redis" for Redis, and "mongodb" for MongoDB. Required. Known values are: - "pg", "mysql", "redis", and "mongodb". - "id": "str", # Optional. A unique ID that can be used to identify - and reference a database cluster. - "maintenance_window": { - "day": "str", # The day of the week on which to apply - maintenance updates. Required. - "description": [ - "str" # Optional. A list of strings, each containing - information about a pending maintenance update. - ], - "hour": "str", # The hour in UTC at which maintenance - updates will be applied in 24 hour format. Required. - "pending": bool # Optional. A boolean value indicating - whether any maintenance is scheduled to be performed in the next window. - }, - "name": "str", # A unique, human-readable name referring to a - database cluster. Required. - "num_nodes": 0, # The number of nodes in the database cluster. - Required. - "private_connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the database. - }, - "private_network_uuid": "str", # Optional. A string specifying the - UUID of the VPC to which the database cluster will be assigned. If excluded, - the cluster when creating a new database cluster, it will be assigned to your - account's default VPC for the region. - "project_id": "str", # Optional. The ID of the project that the - database cluster is assigned to. If excluded when creating a new database - cluster, it will be assigned to your default project. - "region": "str", # The slug identifier for the region where the - database cluster is located. Required. - "rules": [ - { - "cluster_uuid": "str", # Optional. A unique ID for - the database cluster to which the rule is applied. - "created_at": "2020-02-20 00:00:00", # Optional. A - time value given in ISO8601 combined date and time format that - represents when the firewall rule was created. - "type": "str", # The type of resource that the - firewall rule allows to access the database cluster. Required. Known - values are: "droplet", "k8s", "ip_addr", "tag", and "app". - "uuid": "str", # Optional. A unique ID for the - firewall rule itself. - "value": "str" # The ID of the specific resource, - the name of a tag applied to a group of resources, or the IP address - that the firewall rule allows to access the database cluster. - Required. - } - ], - "semantic_version": "str", # Optional. A string representing the - semantic version of the database engine in use for the cluster. - "size": "str", # The slug identifier representing the size of the - nodes in the database cluster. Required. - "status": "str", # Optional. A string representing the current - status of the database cluster. Known values are: "creating", "online", - "resizing", "migrating", and "forking". - "tags": [ - "str" # Optional. An array of tags that have been applied to - the database cluster. - ], - "users": [ - { - "mysql_settings": { - "auth_plugin": "str" # A string specifying - the authentication method to be used for connections"nto the - MySQL user account. The valid values are - ``mysql_native_password``"nor ``caching_sha2_password``. If - excluded when creating a new user, the"ndefault for the version - of MySQL in use will be used. As of MySQL 8.0, the"ndefault is - ``caching_sha2_password``. Required. Known values are: - "mysql_native_password" and "caching_sha2_password". - }, - "name": "str", # The name of a database user. - Required. - "password": "str", # Optional. A randomly generated - password for the database user. - "role": "str" # Optional. A string representing the - database user's role. The value will be either"n"primary" or - "normal". Known values are: "primary" and "normal". - } - ], - "version": "str", # Optional. A string representing the version of - the database engine in use for the cluster. - "version_end_of_availability": "str", # Optional. A timestamp - referring to the date when the particular version will no longer be available - for creating new clusters. If null, the version does not have an end of - availability timeline. - "version_end_of_life": "str" # Optional. A timestamp referring to - the date when the particular version will no longer be supported. If null, - the version does not have an end of life timeline. - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @distributed_trace_async - async def create_cluster(self, body: Union[JSON, IO], **kwargs: Any) -> JSON: - """Create a New Database Cluster. - - To create a database cluster, send a POST request to ``/v2/databases``. - The response will be a JSON object with a key called ``database``. The value of this will be an - object that contains the standard attributes associated with a database cluster. The initial - value of the database cluster's ``status`` attribute will be ``creating``. When the cluster is - ready to receive traffic, this will transition to ``online``. - The embedded ``connection`` and ``private_connection`` objects will contain the information - needed to access the database cluster. - DigitalOcean managed PostgreSQL and MySQL database clusters take automated daily backups. To - create a new database cluster based on a backup of an existing cluster, send a POST request to - ``/v2/databases``. In addition to the standard database cluster attributes, the JSON body must - include a key named ``backup_restore`` with the name of the original database cluster and the - timestamp of the backup to be restored. Creating a database from a backup is the same as - forking a database in the control panel. - Note: Backups are not supported for Redis clusters. - - :param body: Is either a model type or a IO type. Required. - :type body: JSON or IO - :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. - Default value is None. - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 201 - response == { - "database": { - "connection": { - "database": "str", # Optional. The name of the default - database. - "host": "str", # Optional. The FQDN pointing to the database - cluster's current primary node. - "password": "str", # Optional. The randomly generated - password for the default user. - "port": 0, # Optional. The port on which the database - cluster is listening. - "ssl": bool, # Optional. A boolean value indicating if the - connection should be made over SSL. - "uri": "str", # Optional. A connection string in the format - accepted by the ``psql`` command. This is provided as a convenience and - should be able to be constructed by the other attributes. - "user": "str" # Optional. The default user for the database. - }, - "created_at": "2020-02-20 00:00:00", # Optional. A time value given - in ISO8601 combined date and time format that represents when the database - cluster was created. - "db_names": [ - "str" # Optional. An array of strings containing the names - of databases created in the database cluster. - ], - "engine": "str", # A slug representing the database engine used for - the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, - "redis" for Redis, and "mongodb" for MongoDB. Required. Known values are: - "pg", "mysql", "redis", and "mongodb". + "redis" for Redis, "mongodb" for MongoDB, and "kafka" for Kafka. Required. + Known values are: "pg", "mysql", "redis", "mongodb", and "kafka". "id": "str", # Optional. A unique ID that can be used to identify and reference a database cluster. "maintenance_window": { @@ -69487,6 +69220,10 @@ async def create_cluster(self, body: Union[JSON, IO], **kwargs: Any) -> JSON: ], "users": [ { + "access_cert": "str", # Optional. Access certificate + for TLS client authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS + client authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the @@ -69501,9 +69238,435 @@ async def create_cluster(self, body: Union[JSON, IO], **kwargs: Any) -> JSON: Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier + for the ACL. Required. + "permission": "str", # + Permission set applied to the ACL. 'consume' allows for + messages to be consumed from the topic. 'produce' allows + for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' + permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, + update). Required. Known values are: "admin", "consume", + "produce", and "produceconsume". + "topic": "str" # A regex for + matching the topic(s) that this ACL should apply to. + Required. + } + ] + } + } + ], + "version": "str", # Optional. A string representing the version of + the database engine in use for the cluster. + "version_end_of_availability": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be available + for creating new clusters. If null, the version does not have an end of + availability timeline. + "version_end_of_life": "str" # Optional. A timestamp referring to + the date when the particular version will no longer be supported. If null, + the version does not have an end of life timeline. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def create_cluster( + self, body: IO, *, content_type: str = "application/json", **kwargs: Any + ) -> JSON: + """Create a New Database Cluster. + + To create a database cluster, send a POST request to ``/v2/databases``. + The response will be a JSON object with a key called ``database``. The value of this will be an + object that contains the standard attributes associated with a database cluster. The initial + value of the database cluster's ``status`` attribute will be ``creating``. When the cluster is + ready to receive traffic, this will transition to ``online``. + The embedded ``connection`` and ``private_connection`` objects will contain the information + needed to access the database cluster. + DigitalOcean managed PostgreSQL and MySQL database clusters take automated daily backups. To + create a new database cluster based on a backup of an existing cluster, send a POST request to + ``/v2/databases``. In addition to the standard database cluster attributes, the JSON body must + include a key named ``backup_restore`` with the name of the original database cluster and the + timestamp of the backup to be restored. Creating a database from a backup is the same as + forking a database in the control panel. + Note: Backups are not supported for Redis clusters. + + :param body: Required. + :type body: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 201 + response == { + "database": { + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the database. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the database + cluster was created. + "db_names": [ + "str" # Optional. An array of strings containing the names + of databases created in the database cluster. + ], + "engine": "str", # A slug representing the database engine used for + the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, + "redis" for Redis, "mongodb" for MongoDB, and "kafka" for Kafka. Required. + Known values are: "pg", "mysql", "redis", "mongodb", and "kafka". + "id": "str", # Optional. A unique ID that can be used to identify + and reference a database cluster. + "maintenance_window": { + "day": "str", # The day of the week on which to apply + maintenance updates. Required. + "description": [ + "str" # Optional. A list of strings, each containing + information about a pending maintenance update. + ], + "hour": "str", # The hour in UTC at which maintenance + updates will be applied in 24 hour format. Required. + "pending": bool # Optional. A boolean value indicating + whether any maintenance is scheduled to be performed in the next window. + }, + "name": "str", # A unique, human-readable name referring to a + database cluster. Required. + "num_nodes": 0, # The number of nodes in the database cluster. + Required. + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the database. + }, + "private_network_uuid": "str", # Optional. A string specifying the + UUID of the VPC to which the database cluster will be assigned. If excluded, + the cluster when creating a new database cluster, it will be assigned to your + account's default VPC for the region. + "project_id": "str", # Optional. The ID of the project that the + database cluster is assigned to. If excluded when creating a new database + cluster, it will be assigned to your default project. + "region": "str", # The slug identifier for the region where the + database cluster is located. Required. + "rules": [ + { + "cluster_uuid": "str", # Optional. A unique ID for + the database cluster to which the rule is applied. + "created_at": "2020-02-20 00:00:00", # Optional. A + time value given in ISO8601 combined date and time format that + represents when the firewall rule was created. + "type": "str", # The type of resource that the + firewall rule allows to access the database cluster. Required. Known + values are: "droplet", "k8s", "ip_addr", "tag", and "app". + "uuid": "str", # Optional. A unique ID for the + firewall rule itself. + "value": "str" # The ID of the specific resource, + the name of a tag applied to a group of resources, or the IP address + that the firewall rule allows to access the database cluster. + Required. + } + ], + "semantic_version": "str", # Optional. A string representing the + semantic version of the database engine in use for the cluster. + "size": "str", # The slug identifier representing the size of the + nodes in the database cluster. Required. + "status": "str", # Optional. A string representing the current + status of the database cluster. Known values are: "creating", "online", + "resizing", "migrating", and "forking". + "tags": [ + "str" # Optional. An array of tags that have been applied to + the database cluster. + ], + "users": [ + { + "access_cert": "str", # Optional. Access certificate + for TLS client authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS + client authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying + the authentication method to be used for connections"nto the + MySQL user account. The valid values are + ``mysql_native_password``"nor ``caching_sha2_password``. If + excluded when creating a new user, the"ndefault for the version + of MySQL in use will be used. As of MySQL 8.0, the"ndefault is + ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". + }, + "name": "str", # The name of a database user. + Required. + "password": "str", # Optional. A randomly generated + password for the database user. + "role": "str", # Optional. A string representing the + database user's role. The value will be either"n"primary" or + "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier + for the ACL. Required. + "permission": "str", # + Permission set applied to the ACL. 'consume' allows for + messages to be consumed from the topic. 'produce' allows + for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' + permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, + update). Required. Known values are: "admin", "consume", + "produce", and "produceconsume". + "topic": "str" # A regex for + matching the topic(s) that this ACL should apply to. + Required. + } + ] + } + } + ], + "version": "str", # Optional. A string representing the version of + the database engine in use for the cluster. + "version_end_of_availability": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be available + for creating new clusters. If null, the version does not have an end of + availability timeline. + "version_end_of_life": "str" # Optional. A timestamp referring to + the date when the particular version will no longer be supported. If null, + the version does not have an end of life timeline. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def create_cluster(self, body: Union[JSON, IO], **kwargs: Any) -> JSON: + """Create a New Database Cluster. + + To create a database cluster, send a POST request to ``/v2/databases``. + The response will be a JSON object with a key called ``database``. The value of this will be an + object that contains the standard attributes associated with a database cluster. The initial + value of the database cluster's ``status`` attribute will be ``creating``. When the cluster is + ready to receive traffic, this will transition to ``online``. + The embedded ``connection`` and ``private_connection`` objects will contain the information + needed to access the database cluster. + DigitalOcean managed PostgreSQL and MySQL database clusters take automated daily backups. To + create a new database cluster based on a backup of an existing cluster, send a POST request to + ``/v2/databases``. In addition to the standard database cluster attributes, the JSON body must + include a key named ``backup_restore`` with the name of the original database cluster and the + timestamp of the backup to be restored. Creating a database from a backup is the same as + forking a database in the control panel. + Note: Backups are not supported for Redis clusters. + + :param body: Is either a model type or a IO type. Required. + :type body: JSON or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 201 + response == { + "database": { + "connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the database. + }, + "created_at": "2020-02-20 00:00:00", # Optional. A time value given + in ISO8601 combined date and time format that represents when the database + cluster was created. + "db_names": [ + "str" # Optional. An array of strings containing the names + of databases created in the database cluster. + ], + "engine": "str", # A slug representing the database engine used for + the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, + "redis" for Redis, "mongodb" for MongoDB, and "kafka" for Kafka. Required. + Known values are: "pg", "mysql", "redis", "mongodb", and "kafka". + "id": "str", # Optional. A unique ID that can be used to identify + and reference a database cluster. + "maintenance_window": { + "day": "str", # The day of the week on which to apply + maintenance updates. Required. + "description": [ + "str" # Optional. A list of strings, each containing + information about a pending maintenance update. + ], + "hour": "str", # The hour in UTC at which maintenance + updates will be applied in 24 hour format. Required. + "pending": bool # Optional. A boolean value indicating + whether any maintenance is scheduled to be performed in the next window. + }, + "name": "str", # A unique, human-readable name referring to a + database cluster. Required. + "num_nodes": 0, # The number of nodes in the database cluster. + Required. + "private_connection": { + "database": "str", # Optional. The name of the default + database. + "host": "str", # Optional. The FQDN pointing to the database + cluster's current primary node. + "password": "str", # Optional. The randomly generated + password for the default user. + "port": 0, # Optional. The port on which the database + cluster is listening. + "ssl": bool, # Optional. A boolean value indicating if the + connection should be made over SSL. + "uri": "str", # Optional. A connection string in the format + accepted by the ``psql`` command. This is provided as a convenience and + should be able to be constructed by the other attributes. + "user": "str" # Optional. The default user for the database. + }, + "private_network_uuid": "str", # Optional. A string specifying the + UUID of the VPC to which the database cluster will be assigned. If excluded, + the cluster when creating a new database cluster, it will be assigned to your + account's default VPC for the region. + "project_id": "str", # Optional. The ID of the project that the + database cluster is assigned to. If excluded when creating a new database + cluster, it will be assigned to your default project. + "region": "str", # The slug identifier for the region where the + database cluster is located. Required. + "rules": [ + { + "cluster_uuid": "str", # Optional. A unique ID for + the database cluster to which the rule is applied. + "created_at": "2020-02-20 00:00:00", # Optional. A + time value given in ISO8601 combined date and time format that + represents when the firewall rule was created. + "type": "str", # The type of resource that the + firewall rule allows to access the database cluster. Required. Known + values are: "droplet", "k8s", "ip_addr", "tag", and "app". + "uuid": "str", # Optional. A unique ID for the + firewall rule itself. + "value": "str" # The ID of the specific resource, + the name of a tag applied to a group of resources, or the IP address + that the firewall rule allows to access the database cluster. + Required. + } + ], + "semantic_version": "str", # Optional. A string representing the + semantic version of the database engine in use for the cluster. + "size": "str", # The slug identifier representing the size of the + nodes in the database cluster. Required. + "status": "str", # Optional. A string representing the current + status of the database cluster. Known values are: "creating", "online", + "resizing", "migrating", and "forking". + "tags": [ + "str" # Optional. An array of tags that have been applied to + the database cluster. + ], + "users": [ + { + "access_cert": "str", # Optional. Access certificate + for TLS client authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS + client authentication. (Kafka only). + "mysql_settings": { + "auth_plugin": "str" # A string specifying + the authentication method to be used for connections"nto the + MySQL user account. The valid values are + ``mysql_native_password``"nor ``caching_sha2_password``. If + excluded when creating a new user, the"ndefault for the version + of MySQL in use will be used. As of MySQL 8.0, the"ndefault is + ``caching_sha2_password``. Required. Known values are: + "mysql_native_password" and "caching_sha2_password". + }, + "name": "str", # The name of a database user. + Required. + "password": "str", # Optional. A randomly generated + password for the database user. + "role": "str", # Optional. A string representing the + database user's role. The value will be either"n"primary" or + "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier + for the ACL. Required. + "permission": "str", # + Permission set applied to the ACL. 'consume' allows for + messages to be consumed from the topic. 'produce' allows + for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' + permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, + update). Required. Known values are: "admin", "consume", + "produce", and "produceconsume". + "topic": "str" # A regex for + matching the topic(s) that this ACL should apply to. + Required. + } + ] + } } ], "version": "str", # Optional. A string representing the version of @@ -69663,8 +69826,8 @@ async def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: ], "engine": "str", # A slug representing the database engine used for the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, - "redis" for Redis, and "mongodb" for MongoDB. Required. Known values are: - "pg", "mysql", "redis", and "mongodb". + "redis" for Redis, "mongodb" for MongoDB, and "kafka" for Kafka. Required. + Known values are: "pg", "mysql", "redis", "mongodb", and "kafka". "id": "str", # Optional. A unique ID that can be used to identify and reference a database cluster. "maintenance_window": { @@ -69739,6 +69902,10 @@ async def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: ], "users": [ { + "access_cert": "str", # Optional. Access certificate + for TLS client authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS + client authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the @@ -69753,9 +69920,29 @@ async def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier + for the ACL. Required. + "permission": "str", # + Permission set applied to the ACL. 'consume' allows for + messages to be consumed from the topic. 'produce' allows + for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' + permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, + update). Required. Known values are: "admin", "consume", + "produce", and "produceconsume". + "topic": "str" # A regex for + matching the topic(s) that this ACL should apply to. + Required. + } + ] + } } ], "version": "str", # Optional. A string representing the version of @@ -73014,6 +73201,10 @@ async def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: response == { "users": [ { + "access_cert": "str", # Optional. Access certificate for TLS + client authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -73026,9 +73217,28 @@ async def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the database - user's role. The value will be either"n"primary" or "normal". Known - values are: "primary" and "normal". + "role": "str", # Optional. A string representing the + database user's role. The value will be either"n"primary" or "normal". + Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier for the + ACL. Required. + "permission": "str", # Permission + set applied to the ACL. 'consume' allows for messages to be + consumed from the topic. 'produce' allows for messages to be + published to the topic. 'produceconsume' allows for both + 'consume' and 'produce' permission. 'admin' allows for + 'produceconsume' as well as any operations to administer the + topic (delete, update). Required. Known values are: "admin", + "consume", "produce", and "produceconsume". + "topic": "str" # A regex for + matching the topic(s) that this ACL should apply to. + Required. + } + ] + } } ] } @@ -73134,6 +73344,9 @@ async def add_user( When adding a user to a MySQL cluster, additional options can be configured in the ``mysql_settings`` object. + When adding a user to a Kafka cluster, additional options can be configured in + the ``settings`` object. + The response will be a JSON object with a key called ``user``. The value of this will be an object that contains the standard attributes associated with a database user including its randomly generated password. @@ -73154,6 +73367,10 @@ async def add_user( # JSON input template you can fill out and use as your body input. body = { + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client authentication. + (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user account. The valid values are @@ -73168,14 +73385,34 @@ async def add_user( "readonly": bool, # Optional. For MongoDB clusters, set to ``true`` to create a read-only user."nThis option is not currently supported for other database engines. - "role": "str" # Optional. A string representing the database user's role. + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier for the ACL. Required. + "permission": "str", # Permission set applied to the + ACL. 'consume' allows for messages to be consumed from the topic. + 'produce' allows for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' permission. + 'admin' allows for 'produceconsume' as well as any operations to + administer the topic (delete, update). Required. Known values are: + "admin", "consume", "produce", and "produceconsume". + "topic": "str" # A regex for matching the topic(s) + that this ACL should apply to. Required. + } + ] + } } # response body for status code(s): 201 response == { "user": { + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -73188,9 +73425,27 @@ async def add_user( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the database user's + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier for the ACL. + Required. + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str" # A regex for matching the + topic(s) that this ACL should apply to. Required. + } + ] + } } } # response body for status code(s): 404 @@ -73225,6 +73480,9 @@ async def add_user( When adding a user to a MySQL cluster, additional options can be configured in the ``mysql_settings`` object. + When adding a user to a Kafka cluster, additional options can be configured in + the ``settings`` object. + The response will be a JSON object with a key called ``user``. The value of this will be an object that contains the standard attributes associated with a database user including its randomly generated password. @@ -73246,6 +73504,10 @@ async def add_user( # response body for status code(s): 201 response == { "user": { + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -73258,9 +73520,27 @@ async def add_user( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the database user's + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier for the ACL. + Required. + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str" # A regex for matching the + topic(s) that this ACL should apply to. Required. + } + ] + } } } # response body for status code(s): 404 @@ -73290,6 +73570,9 @@ async def add_user( When adding a user to a MySQL cluster, additional options can be configured in the ``mysql_settings`` object. + When adding a user to a Kafka cluster, additional options can be configured in + the ``settings`` object. + The response will be a JSON object with a key called ``user``. The value of this will be an object that contains the standard attributes associated with a database user including its randomly generated password. @@ -73311,6 +73594,10 @@ async def add_user( # response body for status code(s): 201 response == { "user": { + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -73323,9 +73610,27 @@ async def add_user( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the database user's + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier for the ACL. + Required. + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str" # A regex for matching the + topic(s) that this ACL should apply to. Required. + } + ] + } } } # response body for status code(s): 404 @@ -73439,9 +73744,11 @@ async def get_user( The response will be a JSON object with a ``user`` key. This will be set to an object containing the standard database user attributes. - For MySQL clusters, additional options will be contained in the mysql_settings + For MySQL clusters, additional options will be contained in the ``mysql_settings`` object. + For Kafka clusters, additional options will be contained in the ``settings`` object. + :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str :param username: The name of the database user. Required. @@ -73456,6 +73763,10 @@ async def get_user( # response body for status code(s): 200 response == { "user": { + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -73468,9 +73779,27 @@ async def get_user( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the database user's + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier for the ACL. + Required. + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str" # A regex for matching the + topic(s) that this ACL should apply to. Required. + } + ] + } } } # response body for status code(s): 404 @@ -73715,6 +74044,10 @@ async def reset_auth( # response body for status code(s): 200 response == { "user": { + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -73727,9 +74060,27 @@ async def reset_auth( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the database user's + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier for the ACL. + Required. + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str" # A regex for matching the + topic(s) that this ACL should apply to. Required. + } + ] + } } } # response body for status code(s): 404 @@ -73786,6 +74137,10 @@ async def reset_auth( # response body for status code(s): 200 response == { "user": { + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -73798,9 +74153,27 @@ async def reset_auth( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the database user's + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier for the ACL. + Required. + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str" # A regex for matching the + topic(s) that this ACL should apply to. Required. + } + ] + } } } # response body for status code(s): 404 @@ -73855,6 +74228,10 @@ async def reset_auth( # response body for status code(s): 200 response == { "user": { + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -73867,362 +74244,380 @@ async def reset_auth( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the database user's + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 401: lambda response: ClientAuthenticationError(response=response), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) # type: Optional[str] - cls = kwargs.pop("cls", None) # type: ClsType[JSON] - - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IO, bytes)): - _content = body - else: - _json = body - - request = build_databases_reset_auth_request( - database_cluster_uuid=database_cluster_uuid, - username=username, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - request.url = self._client.format_url(request.url) # type: ignore - - pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access - request, stream=False, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - map_error( - status_code=response.status_code, response=response, error_map=error_map - ) - raise HttpResponseError(response=response) - - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) - - return cast(JSON, deserialized) - - @distributed_trace_async - async def list(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: - """List All Databases. - - To list all of the databases in a clusters, send a GET request to - ``/v2/databases/$DATABASE_ID/dbs``. - - The result will be a JSON object with a ``dbs`` key. This will be set to an array - of database objects, each of which will contain the standard database attributes. - - Note: Database management is not supported for Redis clusters. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 200 - response == { - "dbs": [ - { - "name": "str" # The name of the database. Required. + "settings": { + "acl": [ + { + "id": "str", # An identifier for the ACL. + Required. + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str" # A regex for matching the + topic(s) that this ACL should apply to. Required. + } + ] } - ] - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - error_map = { - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 401: lambda response: ClientAuthenticationError(response=response), - 429: HttpResponseError, - 500: HttpResponseError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls = kwargs.pop("cls", None) # type: ClsType[JSON] - - request = build_databases_list_request( - database_cluster_uuid=database_cluster_uuid, - headers=_headers, - params=_params, - ) - request.url = self._client.format_url(request.url) # type: ignore - - pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access - request, stream=False, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 404]: - map_error( - status_code=response.status_code, response=response, error_map=error_map - ) - raise HttpResponseError(response=response) - - response_headers = {} - if response.status_code == 200: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if response.status_code == 404: - response_headers["ratelimit-limit"] = self._deserialize( - "int", response.headers.get("ratelimit-limit") - ) - response_headers["ratelimit-remaining"] = self._deserialize( - "int", response.headers.get("ratelimit-remaining") - ) - response_headers["ratelimit-reset"] = self._deserialize( - "int", response.headers.get("ratelimit-reset") - ) - - if response.content: - deserialized = response.json() - else: - deserialized = None - - if cls: - return cls(pipeline_response, cast(JSON, deserialized), response_headers) - - return cast(JSON, deserialized) - - @overload - async def add( - self, - database_cluster_uuid: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> JSON: - """Add a New Database. - - To add a new database to an existing cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/dbs``. - - Note: Database management is not supported for Redis clusters. - - The response will be a JSON object with a key called ``db``. The value of this will be - an object that contains the standard attributes associated with a database. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "name": "str" # The name of the database. Required. - } - - # response body for status code(s): 201 - response == { - "db": { - "name": "str" # The name of the database. Required. - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - async def add( - self, - database_cluster_uuid: str, - body: IO, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> JSON: - """Add a New Database. - - To add a new database to an existing cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/dbs``. - - Note: Database management is not supported for Redis clusters. - - The response will be a JSON object with a key called ``db``. The value of this will be - an object that contains the standard attributes associated with a database. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: IO - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 201 - response == { - "db": { - "name": "str" # The name of the database. Required. - } - } - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @distributed_trace_async - async def add( - self, database_cluster_uuid: str, body: Union[JSON, IO], **kwargs: Any - ) -> JSON: - """Add a New Database. - - To add a new database to an existing cluster, send a POST request to - ``/v2/databases/$DATABASE_ID/dbs``. - - Note: Database management is not supported for Redis clusters. - - The response will be a JSON object with a key called ``db``. The value of this will be - an object that contains the standard attributes associated with a database. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Is either a model type or a IO type. Required. - :type body: JSON or IO - :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. - Default value is None. - :paramtype content_type: str - :return: JSON object - :rtype: JSON - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # response body for status code(s): 201 - response == { - "db": { - "name": "str" # The name of the database. Required. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 401: lambda response: ClientAuthenticationError(response=response), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) # type: Optional[str] + cls = kwargs.pop("cls", None) # type: ClsType[JSON] + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IO, bytes)): + _content = body + else: + _json = body + + request = build_databases_reset_auth_request( + database_cluster_uuid=database_cluster_uuid, + username=username, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + map_error( + status_code=response.status_code, response=response, error_map=error_map + ) + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) + + return cast(JSON, deserialized) + + @distributed_trace_async + async def list(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + """List All Databases. + + To list all of the databases in a clusters, send a GET request to + ``/v2/databases/$DATABASE_ID/dbs``. + + The result will be a JSON object with a ``dbs`` key. This will be set to an array + of database objects, each of which will contain the standard database attributes. + + Note: Database management is not supported for Redis clusters. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "dbs": [ + { + "name": "str" # The name of the database. Required. + } + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 401: lambda response: ClientAuthenticationError(response=response), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls = kwargs.pop("cls", None) # type: ClsType[JSON] + + request = build_databases_list_request( + database_cluster_uuid=database_cluster_uuid, + headers=_headers, + params=_params, + ) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + map_error( + status_code=response.status_code, response=response, error_map=error_map + ) + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) + + return cast(JSON, deserialized) + + @overload + async def add( + self, + database_cluster_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + """Add a New Database. + + To add a new database to an existing cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/dbs``. + + Note: Database management is not supported for Redis clusters. + + The response will be a JSON object with a key called ``db``. The value of this will be + an object that contains the standard attributes associated with a database. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "name": "str" # The name of the database. Required. + } + + # response body for status code(s): 201 + response == { + "db": { + "name": "str" # The name of the database. Required. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def add( + self, + database_cluster_uuid: str, + body: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + """Add a New Database. + + To add a new database to an existing cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/dbs``. + + Note: Database management is not supported for Redis clusters. + + The response will be a JSON object with a key called ``db``. The value of this will be + an object that contains the standard attributes associated with a database. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 201 + response == { + "db": { + "name": "str" # The name of the database. Required. + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def add( + self, database_cluster_uuid: str, body: Union[JSON, IO], **kwargs: Any + ) -> JSON: + """Add a New Database. + + To add a new database to an existing cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/dbs``. + + Note: Database management is not supported for Redis clusters. + + The response will be a JSON object with a key called ``db``. The value of this will be + an object that contains the standard attributes associated with a database. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Is either a model type or a IO type. Required. + :type body: JSON or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 201 + response == { + "db": { + "name": "str" # The name of the database. Required. } } # response body for status code(s): 404 @@ -75568,14 +75963,868 @@ async def delete_connection_pool( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSON]] + + request = build_databases_delete_connection_pool_request( + database_cluster_uuid=database_cluster_uuid, + pool_name=pool_name, + headers=_headers, + params=_params, + ) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + map_error( + status_code=response.status_code, response=response, error_map=error_map + ) + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + @distributed_trace_async + async def get_eviction_policy( + self, database_cluster_uuid: str, **kwargs: Any + ) -> JSON: + """Retrieve the Eviction Policy for a Redis Cluster. + + To retrieve the configured eviction policy for an existing Redis cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/eviction_policy``. + The response will be a JSON object with an ``eviction_policy`` key. This will be set to a + string representing the eviction policy. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "eviction_policy": "str" # A string specifying the desired eviction policy + for the Redis cluster."n"n"n* ``noeviction``"" : Don't evict any data, returns + error when memory limit is reached."n* ``allkeys_lru:`` Evict any key, least + recently used (LRU) first."n* ``allkeys_random``"" : Evict keys in a random + order."n* ``volatile_lru``"" : Evict keys with expiration only, least recently + used (LRU) first."n* ``volatile_random``"" : Evict keys with expiration only in a + random order."n* ``volatile_ttl``"" : Evict keys with expiration only, shortest + time-to-live (TTL) first. Required. Known values are: "noeviction", + "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and + "volatile_ttl". + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 401: lambda response: ClientAuthenticationError(response=response), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls = kwargs.pop("cls", None) # type: ClsType[JSON] + + request = build_databases_get_eviction_policy_request( + database_cluster_uuid=database_cluster_uuid, + headers=_headers, + params=_params, + ) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + map_error( + status_code=response.status_code, response=response, error_map=error_map + ) + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) + + return cast(JSON, deserialized) + + @overload + async def update_eviction_policy( + self, + database_cluster_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> Optional[JSON]: + """Configure the Eviction Policy for a Redis Cluster. + + To configure an eviction policy for an existing Redis cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "eviction_policy": "str" # A string specifying the desired eviction policy + for the Redis cluster."n"n"n* ``noeviction``"" : Don't evict any data, returns + error when memory limit is reached."n* ``allkeys_lru:`` Evict any key, least + recently used (LRU) first."n* ``allkeys_random``"" : Evict keys in a random + order."n* ``volatile_lru``"" : Evict keys with expiration only, least recently + used (LRU) first."n* ``volatile_random``"" : Evict keys with expiration only in a + random order."n* ``volatile_ttl``"" : Evict keys with expiration only, shortest + time-to-live (TTL) first. Required. Known values are: "noeviction", + "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and + "volatile_ttl". + } + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def update_eviction_policy( + self, + database_cluster_uuid: str, + body: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> Optional[JSON]: + """Configure the Eviction Policy for a Redis Cluster. + + To configure an eviction policy for an existing Redis cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def update_eviction_policy( + self, database_cluster_uuid: str, body: Union[JSON, IO], **kwargs: Any + ) -> Optional[JSON]: + """Configure the Eviction Policy for a Redis Cluster. + + To configure an eviction policy for an existing Redis cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Is either a model type or a IO type. Required. + :type body: JSON or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 401: lambda response: ClientAuthenticationError(response=response), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) # type: Optional[str] + cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSON]] + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IO, bytes)): + _content = body + else: + _json = body + + request = build_databases_update_eviction_policy_request( + database_cluster_uuid=database_cluster_uuid, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + map_error( + status_code=response.status_code, response=response, error_map=error_map + ) + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + @distributed_trace_async + async def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + """Retrieve the SQL Modes for a MySQL Cluster. + + To retrieve the configured SQL modes for an existing MySQL cluster, send a GET request to + ``/v2/databases/$DATABASE_ID/sql_mode``. + The response will be a JSON object with a ``sql_mode`` key. This will be set to a string + representing the configured SQL modes. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "sql_mode": "str" # A string specifying the configured SQL modes for the + MySQL cluster. Required. + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 401: lambda response: ClientAuthenticationError(response=response), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls = kwargs.pop("cls", None) # type: ClsType[JSON] + + request = build_databases_get_sql_mode_request( + database_cluster_uuid=database_cluster_uuid, + headers=_headers, + params=_params, + ) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + map_error( + status_code=response.status_code, response=response, error_map=error_map + ) + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) + + return cast(JSON, deserialized) + + @overload + async def update_sql_mode( + self, + database_cluster_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> Optional[JSON]: + """Update SQL Mode for a Cluster. + + To configure the SQL modes for an existing MySQL cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 + documentation for a `full list of supported SQL modes + `_. + A successful request will receive a 204 No Content status code with no body in response. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "sql_mode": "str" # A string specifying the configured SQL modes for the + MySQL cluster. Required. + } + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def update_sql_mode( + self, + database_cluster_uuid: str, + body: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> Optional[JSON]: + """Update SQL Mode for a Cluster. + + To configure the SQL modes for an existing MySQL cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 + documentation for a `full list of supported SQL modes + `_. + A successful request will receive a 204 No Content status code with no body in response. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def update_sql_mode( + self, database_cluster_uuid: str, body: Union[JSON, IO], **kwargs: Any + ) -> Optional[JSON]: + """Update SQL Mode for a Cluster. + + To configure the SQL modes for an existing MySQL cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 + documentation for a `full list of supported SQL modes + `_. + A successful request will receive a 204 No Content status code with no body in response. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Is either a model type or a IO type. Required. + :type body: JSON or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 401: lambda response: ClientAuthenticationError(response=response), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) # type: Optional[str] + cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSON]] + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IO, bytes)): + _content = body + else: + _json = body + + request = build_databases_update_sql_mode_request( + database_cluster_uuid=database_cluster_uuid, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + map_error( + status_code=response.status_code, response=response, error_map=error_map + ) + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + @overload + async def update_major_version( + self, + database_cluster_uuid: str, + body: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> Optional[JSON]: + """Upgrade Major Version for a Database. + + To upgrade the major version of a database, send a PUT request to + ``/v2/databases/$DATABASE_ID/upgrade``\ , specifying the target version. + A successful request will receive a 204 No Content status code with no body in response. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "version": "str" # Optional. A string representing the version of the + database engine in use for the cluster. + } + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + async def update_major_version( + self, + database_cluster_uuid: str, + body: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> Optional[JSON]: + """Upgrade Major Version for a Database. + + To upgrade the major version of a database, send a PUT request to + ``/v2/databases/$DATABASE_ID/upgrade``\ , specifying the target version. + A successful request will receive a 204 No Content status code with no body in response. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Required. + :type body: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace_async + async def update_major_version( + self, database_cluster_uuid: str, body: Union[JSON, IO], **kwargs: Any + ) -> Optional[JSON]: + """Upgrade Major Version for a Database. + + To upgrade the major version of a database, send a PUT request to + ``/v2/databases/$DATABASE_ID/upgrade``\ , specifying the target version. + A successful request will receive a 204 No Content status code with no body in response. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Is either a model type or a IO type. Required. + :type body: JSON or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 401: lambda response: ClientAuthenticationError(response=response), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} + content_type = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) # type: Optional[str] cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSON]] - request = build_databases_delete_connection_pool_request( + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IO, bytes)): + _content = body + else: + _json = body + + request = build_databases_update_major_version_request( database_cluster_uuid=database_cluster_uuid, - pool_name=pool_name, + content_type=content_type, + json=_json, + content=_content, headers=_headers, params=_params, ) @@ -75628,15 +76877,15 @@ async def delete_connection_pool( return deserialized @distributed_trace_async - async def get_eviction_policy( + async def list_kafka_topics( self, database_cluster_uuid: str, **kwargs: Any ) -> JSON: - """Retrieve the Eviction Policy for a Redis Cluster. + """List Topics for a Kafka Cluster. - To retrieve the configured eviction policy for an existing Redis cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/eviction_policy``. - The response will be a JSON object with an ``eviction_policy`` key. This will be set to a - string representing the eviction policy. + To list all of a Kafka cluster's topics, send a GET request to + ``/v2/databases/$DATABASE_ID/topics``. + + The result will be a JSON object with a ``topics`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str @@ -75649,16 +76898,17 @@ async def get_eviction_policy( # response body for status code(s): 200 response == { - "eviction_policy": "str" # A string specifying the desired eviction policy - for the Redis cluster."n"n"n* ``noeviction``"" : Don't evict any data, returns - error when memory limit is reached."n* ``allkeys_lru:`` Evict any key, least - recently used (LRU) first."n* ``allkeys_random``"" : Evict keys in a random - order."n* ``volatile_lru``"" : Evict keys with expiration only, least recently - used (LRU) first."n* ``volatile_random``"" : Evict keys with expiration only in a - random order."n* ``volatile_ttl``"" : Evict keys with expiration only, shortest - time-to-live (TTL) first. Required. Known values are: "noeviction", - "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and - "volatile_ttl". + "topics": [ + { + "name": "str", # Optional. The name of the Kafka topic. + "partition_count": 0, # Optional. The number of partitions + available for the topic. On update, this value can only be increased. + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. + Known values are: "active", "configuring", "deleting", and "unknown". + } + ] } # response body for status code(s): 404 response == { @@ -75686,7 +76936,7 @@ async def get_eviction_policy( cls = kwargs.pop("cls", None) # type: ClsType[JSON] - request = build_databases_get_eviction_policy_request( + request = build_databases_list_kafka_topics_request( database_cluster_uuid=database_cluster_uuid, headers=_headers, params=_params, @@ -75744,28 +76994,30 @@ async def get_eviction_policy( return cast(JSON, deserialized) @overload - async def update_eviction_policy( + async def create_kafka_topic( self, database_cluster_uuid: str, - body: JSON, + body: Optional[JSON] = None, *, content_type: str = "application/json", **kwargs: Any - ) -> Optional[JSON]: - """Configure the Eviction Policy for a Redis Cluster. + ) -> JSON: + """Create Topic for a Kafka Cluster. - To configure an eviction policy for an existing Redis cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. + To create a topic attached to a Kafka cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/topics``. + + The result will be a JSON object with a ``topic`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Required. + :param body: Default value is None. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -75773,18 +77025,231 @@ async def update_eviction_policy( # JSON input template you can fill out and use as your body input. body = { - "eviction_policy": "str" # A string specifying the desired eviction policy - for the Redis cluster."n"n"n* ``noeviction``"" : Don't evict any data, returns - error when memory limit is reached."n* ``allkeys_lru:`` Evict any key, least - recently used (LRU) first."n* ``allkeys_random``"" : Evict keys in a random - order."n* ``volatile_lru``"" : Evict keys with expiration only, least recently - used (LRU) first."n* ``volatile_random``"" : Evict keys with expiration only in a - random order."n* ``volatile_ttl``"" : Evict keys with expiration only, shortest - time-to-live (TTL) first. Required. Known values are: "noeviction", - "allkeys_lru", "allkeys_random", "volatile_lru", "volatile_random", and - "volatile_ttl". + "config": { + "cleanup_policy": "delete", # Optional. Default value is "delete". + The cleanup_policy sets the retention policy to use on log segments. 'delete' + will discard old segments when retention time/size limits are reached. + 'compact' will enable log compaction, resulting in retention of the latest + value for each key. Known values are: "delete", "compact", and + "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the topic. + Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value is + 86400000. The delete_retention_ms specifies how long (in ms) to retain delete + tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is 60000. + The file_delete_delay_ms specifies the time (in ms) to wait before deleting a + file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default value is + 9223372036854776000. The flush_messages specifies the number of messages to + accumulate on a log partition before messages are flushed to disk. + "flush_ms": 9223372036854776000, # Optional. Default value is + 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a + message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is 4096. The + index_interval_bytes specifies the number of bytes between entries being + added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum + amount of time (in ms) that a message will remain uncompacted. This is only + applicable if the logs are have compaction enabled. + "max_message_bytes": 1048588, # Optional. Default value is 1048588. + The max_messages_bytes specifies the largest record batch size (in bytes) + that can be sent to the server. This is calculated after compression if + compression is enabled. + "message_down_conversion_enable": True, # Optional. Default value is + True. The message_down_conversion_enable specifies whether down-conversion of + message formats is enabled to satisfy consumer requests. When 'false', the + broker will not perform conversion for consumers expecting older message + formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for + consume requests from these older clients. + "message_format_version": "3.0-IV1", # Optional. Default value is + "3.0-IV1". The message_format_version specifies the message format version + used by the broker to append messages to the logs. The value of this setting + is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By + setting a particular message format version, all existing messages on disk + must be smaller or equal to the specified version. Known values are: "0.8.0", + "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", + "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", + "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", + "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", + "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1", + "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default value + is "create_time". The message_timestamp_type specifies whether to use the + message create time or log append time as the timestamp on a message. Known + values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5. + The min_cleanable_dirty_ratio specifies the frequency of log compaction (if + enabled) in relation to duplicates present in the logs. For example, at 0.5, + at most 50% of the log could be duplicates before compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. The + min_compaction_lag_ms specifies the minimum time (in ms) that a message will + remain uncompacted in the log. Only relevant if log compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. The + min_insync_replicas specifies the number of replicas that must ACK a write + for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. The + preallocate specifies whether a file should be preallocated on disk when + creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is 604800000. + The retention_ms specifies the maximum amount of time (in ms) to keep a + message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is 209715200. + The segment_bytes specifies the maximum size of a single log file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000, # Optional. Default value is 604800000. The + segment_ms specifies the period of time after which the log will be forced to + roll if the segment file isn't full. This ensures that retention can delete + or compact old data. + "unclean_leader_election_enable": False # Optional. Default value is + False. Whether unclean_leader_election_enable specifies whether to allow + replicas that are not insync to be elected as leaders as a last resort. This + may result in data loss since those leaders are not insync. + }, + "name": "str", # Optional. The name of the Kafka topic. + "partition_count": 0, # Optional. The number of partitions available for the + topic. On update, this value can only be increased. + "replication_factor": 0 # Optional. The number of nodes to replicate data + across the cluster. } + # response body for status code(s): 201 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000, # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + "unclean_leader_election_enable": False # Optional. Default + value is False. Whether unclean_leader_election_enable specifies whether + to allow replicas that are not insync to be elected as leaders as a last + resort. This may result in data loss since those leaders are not insync. + }, + "name": "str", # The name of the Kafka topic. Required. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -75799,33 +77264,163 @@ async def update_eviction_policy( """ @overload - async def update_eviction_policy( + async def create_kafka_topic( self, database_cluster_uuid: str, - body: IO, + body: Optional[IO] = None, *, content_type: str = "application/json", **kwargs: Any - ) -> Optional[JSON]: - """Configure the Eviction Policy for a Redis Cluster. + ) -> JSON: + """Create Topic for a Kafka Cluster. - To configure an eviction policy for an existing Redis cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. + To create a topic attached to a Kafka cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/topics``. + + The result will be a JSON object with a ``topic`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Required. + :param body: Default value is None. :type body: IO :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # response body for status code(s): 201 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000, # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + "unclean_leader_election_enable": False # Optional. Default + value is False. Whether unclean_leader_election_enable specifies whether + to allow replicas that are not insync to be elected as leaders as a last + resort. This may result in data loss since those leaders are not insync. + }, + "name": "str", # The name of the Kafka topic. Required. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -75840,28 +77435,161 @@ async def update_eviction_policy( """ @distributed_trace_async - async def update_eviction_policy( - self, database_cluster_uuid: str, body: Union[JSON, IO], **kwargs: Any - ) -> Optional[JSON]: - """Configure the Eviction Policy for a Redis Cluster. + async def create_kafka_topic( + self, + database_cluster_uuid: str, + body: Optional[Union[JSON, IO]] = None, + **kwargs: Any + ) -> JSON: + """Create Topic for a Kafka Cluster. - To configure an eviction policy for an existing Redis cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/eviction_policy`` specifying the desired policy. + To create a topic attached to a Kafka cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/topics``. + + The result will be a JSON object with a ``topic`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Is either a model type or a IO type. Required. + :param body: Is either a model type or a IO type. Default value is None. :type body: JSON or IO :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. Default value is None. :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # response body for status code(s): 201 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000, # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + "unclean_leader_election_enable": False # Optional. Default + value is False. Whether unclean_leader_election_enable specifies whether + to allow replicas that are not insync to be elected as leaders as a last + resort. This may result in data loss since those leaders are not insync. + }, + "name": "str", # The name of the Kafka topic. Required. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -75889,7 +77617,7 @@ async def update_eviction_policy( content_type = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) # type: Optional[str] - cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSON]] + cls = kwargs.pop("cls", None) # type: ClsType[JSON] content_type = content_type or "application/json" _json = None @@ -75897,9 +77625,12 @@ async def update_eviction_policy( if isinstance(body, (IO, bytes)): _content = body else: - _json = body + if body is not None: + _json = body + else: + _json = None - request = build_databases_update_eviction_policy_request( + request = build_databases_create_kafka_topic_request( database_cluster_uuid=database_cluster_uuid, content_type=content_type, json=_json, @@ -75915,15 +77646,14 @@ async def update_eviction_policy( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [201, 404]: map_error( status_code=response.status_code, response=response, error_map=error_map ) raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 201: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -75934,6 +77664,11 @@ async def update_eviction_policy( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -75951,21 +77686,25 @@ async def update_eviction_policy( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) + return cls(pipeline_response, cast(JSON, deserialized), response_headers) - return deserialized + return cast(JSON, deserialized) @distributed_trace_async - async def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: - """Retrieve the SQL Modes for a MySQL Cluster. + async def get_kafka_topic( + self, database_cluster_uuid: str, topic_name: str, **kwargs: Any + ) -> JSON: + """Get Topic for a Kafka Cluster. - To retrieve the configured SQL modes for an existing MySQL cluster, send a GET request to - ``/v2/databases/$DATABASE_ID/sql_mode``. - The response will be a JSON object with a ``sql_mode`` key. This will be set to a string - representing the configured SQL modes. + To retrieve a given topic by name from the set of a Kafka cluster's topics, + send a GET request to ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + + The result will be a JSON object with a ``topic`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str + :param topic_name: The name used to identify the Kafka topic. Required. + :type topic_name: str :return: JSON object :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: @@ -75975,8 +77714,131 @@ async def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { - "sql_mode": "str" # A string specifying the configured SQL modes for the - MySQL cluster. Required. + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000, # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + "unclean_leader_election_enable": False # Optional. Default + value is False. Whether unclean_leader_election_enable specifies whether + to allow replicas that are not insync to be elected as leaders as a last + resort. This may result in data loss since those leaders are not insync. + }, + "name": "str", # The name of the Kafka topic. Required. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } } # response body for status code(s): 404 response == { @@ -76004,8 +77866,9 @@ async def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: cls = kwargs.pop("cls", None) # type: ClsType[JSON] - request = build_databases_get_sql_mode_request( + request = build_databases_get_kafka_topic_request( database_cluster_uuid=database_cluster_uuid, + topic_name=topic_name, headers=_headers, params=_params, ) @@ -76062,31 +77925,33 @@ async def get_sql_mode(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: return cast(JSON, deserialized) @overload - async def update_sql_mode( + async def update_kafka_topic( self, database_cluster_uuid: str, - body: JSON, + topic_name: str, + body: Optional[JSON] = None, *, content_type: str = "application/json", **kwargs: Any - ) -> Optional[JSON]: - """Update SQL Mode for a Cluster. + ) -> JSON: + """Update Topic for a Kafka Cluster. - To configure the SQL modes for an existing MySQL cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 - documentation for a `full list of supported SQL modes - `_. - A successful request will receive a 204 No Content status code with no body in response. + To update a topic attached to a Kafka cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + + The result will be a JSON object with a ``topic`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Required. + :param topic_name: The name used to identify the Kafka topic. Required. + :type topic_name: str + :param body: Default value is None. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: @@ -76094,10 +77959,240 @@ async def update_sql_mode( # JSON input template you can fill out and use as your body input. body = { - "sql_mode": "str" # A string specifying the configured SQL modes for the - MySQL cluster. Required. + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000, # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + "unclean_leader_election_enable": False # Optional. Default + value is False. Whether unclean_leader_election_enable specifies whether + to allow replicas that are not insync to be elected as leaders as a last + resort. This may result in data loss since those leaders are not insync. + }, + "name": "str", # Optional. The name of the Kafka topic. + "partition_count": 0, # Optional. The number of partitions available + for the topic. On update, this value can only be increased. + "replication_factor": 0 # Optional. The number of nodes to replicate + data across the cluster. + } } + # response body for status code(s): 200 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000, # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + "unclean_leader_election_enable": False # Optional. Default + value is False. Whether unclean_leader_election_enable specifies whether + to allow replicas that are not insync to be elected as leaders as a last + resort. This may result in data loss since those leaders are not insync. + }, + "name": "str", # The name of the Kafka topic. Required. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -76112,36 +78207,166 @@ async def update_sql_mode( """ @overload - async def update_sql_mode( + async def update_kafka_topic( self, database_cluster_uuid: str, - body: IO, + topic_name: str, + body: Optional[IO] = None, *, content_type: str = "application/json", **kwargs: Any - ) -> Optional[JSON]: - """Update SQL Mode for a Cluster. + ) -> JSON: + """Update Topic for a Kafka Cluster. - To configure the SQL modes for an existing MySQL cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 - documentation for a `full list of supported SQL modes - `_. - A successful request will receive a 204 No Content status code with no body in response. + To update a topic attached to a Kafka cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + + The result will be a JSON object with a ``topic`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Required. + :param topic_name: The name used to identify the Kafka topic. Required. + :type topic_name: str + :param body: Default value is None. :type body: IO :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # response body for status code(s): 200 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000, # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + "unclean_leader_election_enable": False # Optional. Default + value is False. Whether unclean_leader_election_enable specifies whether + to allow replicas that are not insync to be elected as leaders as a last + resort. This may result in data loss since those leaders are not insync. + }, + "name": "str", # The name of the Kafka topic. Required. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -76156,31 +78381,164 @@ async def update_sql_mode( """ @distributed_trace_async - async def update_sql_mode( - self, database_cluster_uuid: str, body: Union[JSON, IO], **kwargs: Any - ) -> Optional[JSON]: - """Update SQL Mode for a Cluster. + async def update_kafka_topic( + self, + database_cluster_uuid: str, + topic_name: str, + body: Optional[Union[JSON, IO]] = None, + **kwargs: Any + ) -> JSON: + """Update Topic for a Kafka Cluster. - To configure the SQL modes for an existing MySQL cluster, send a PUT request to - ``/v2/databases/$DATABASE_ID/sql_mode`` specifying the desired modes. See the official MySQL 8 - documentation for a `full list of supported SQL modes - `_. - A successful request will receive a 204 No Content status code with no body in response. + To update a topic attached to a Kafka cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + + The result will be a JSON object with a ``topic`` key. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Is either a model type or a IO type. Required. + :param topic_name: The name used to identify the Kafka topic. Required. + :type topic_name: str + :param body: Is either a model type or a IO type. Default value is None. :type body: JSON or IO :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. Default value is None. :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None + :return: JSON object + :rtype: JSON :raises ~azure.core.exceptions.HttpResponseError: Example: .. code-block:: python + # response body for status code(s): 200 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000, # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + "unclean_leader_election_enable": False # Optional. Default + value is False. Whether unclean_leader_election_enable specifies whether + to allow replicas that are not insync to be elected as leaders as a last + resort. This may result in data loss since those leaders are not insync. + }, + "name": "str", # The name of the Kafka topic. Required. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } # response body for status code(s): 404 response == { "id": "str", # A short identifier corresponding to the HTTP status code @@ -76208,7 +78566,7 @@ async def update_sql_mode( content_type = kwargs.pop( "content_type", _headers.pop("Content-Type", None) ) # type: Optional[str] - cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSON]] + cls = kwargs.pop("cls", None) # type: ClsType[JSON] content_type = content_type or "application/json" _json = None @@ -76216,10 +78574,14 @@ async def update_sql_mode( if isinstance(body, (IO, bytes)): _content = body else: - _json = body + if body is not None: + _json = body + else: + _json = None - request = build_databases_update_sql_mode_request( + request = build_databases_update_kafka_topic_request( database_cluster_uuid=database_cluster_uuid, + topic_name=topic_name, content_type=content_type, json=_json, content=_content, @@ -76234,15 +78596,14 @@ async def update_sql_mode( response = pipeline_response.http_response - if response.status_code not in [204, 404]: + if response.status_code not in [200, 404]: map_error( status_code=response.status_code, response=response, error_map=error_map ) raise HttpResponseError(response=response) - deserialized = None response_headers = {} - if response.status_code == 204: + if response.status_code == 200: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") ) @@ -76253,6 +78614,11 @@ async def update_sql_mode( "int", response.headers.get("ratelimit-reset") ) + if response.content: + deserialized = response.json() + else: + deserialized = None + if response.status_code == 404: response_headers["ratelimit-limit"] = self._deserialize( "int", response.headers.get("ratelimit-limit") @@ -76270,117 +78636,26 @@ async def update_sql_mode( deserialized = None if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - - @overload - async def update_major_version( - self, - database_cluster_uuid: str, - body: JSON, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> Optional[JSON]: - """Upgrade Major Version for a Database. - - To upgrade the major version of a database, send a PUT request to - ``/v2/databases/$DATABASE_ID/upgrade``\ , specifying the target version. - A successful request will receive a 204 No Content status code with no body in response. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python - - # JSON input template you can fill out and use as your body input. - body = { - "version": "str" # Optional. A string representing the version of the - database engine in use for the cluster. - } - - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ - - @overload - async def update_major_version( - self, - database_cluster_uuid: str, - body: IO, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> Optional[JSON]: - """Upgrade Major Version for a Database. - - To upgrade the major version of a database, send a PUT request to - ``/v2/databases/$DATABASE_ID/upgrade``\ , specifying the target version. - A successful request will receive a 204 No Content status code with no body in response. - - :param database_cluster_uuid: A unique identifier for a database cluster. Required. - :type database_cluster_uuid: str - :param body: Required. - :type body: IO - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: JSON object or None - :rtype: JSON or None - :raises ~azure.core.exceptions.HttpResponseError: - - Example: - .. code-block:: python + return cls(pipeline_response, cast(JSON, deserialized), response_headers) - # response body for status code(s): 404 - response == { - "id": "str", # A short identifier corresponding to the HTTP status code - returned. For example, the ID for a response returning a 404 status code would - be "not_found.". Required. - "message": "str", # A message providing additional information about the - error, including details to help resolve it when possible. Required. - "request_id": "str" # Optional. Optionally, some endpoints may include a - request ID that should be provided when reporting bugs or opening support - tickets to help identify the issue. - } - """ + return cast(JSON, deserialized) @distributed_trace_async - async def update_major_version( - self, database_cluster_uuid: str, body: Union[JSON, IO], **kwargs: Any + async def delete_kafka_topic( + self, database_cluster_uuid: str, topic_name: str, **kwargs: Any ) -> Optional[JSON]: - """Upgrade Major Version for a Database. + """Delete Topic for a Kafka Cluster. - To upgrade the major version of a database, send a PUT request to - ``/v2/databases/$DATABASE_ID/upgrade``\ , specifying the target version. - A successful request will receive a 204 No Content status code with no body in response. + To delete a single topic within a Kafka cluster, send a DELETE request + to ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + + A status of 204 will be given. This indicates that the request was + processed successfully, but that no response body is needed. :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str - :param body: Is either a model type or a IO type. Required. - :type body: JSON or IO - :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. - Default value is None. - :paramtype content_type: str + :param topic_name: The name used to identify the Kafka topic. Required. + :type topic_name: str :return: JSON object or None :rtype: JSON or None :raises ~azure.core.exceptions.HttpResponseError: @@ -76409,27 +78684,14 @@ async def update_major_version( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type = kwargs.pop( - "content_type", _headers.pop("Content-Type", None) - ) # type: Optional[str] cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSON]] - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(body, (IO, bytes)): - _content = body - else: - _json = body - - request = build_databases_update_major_version_request( + request = build_databases_delete_kafka_topic_request( database_cluster_uuid=database_cluster_uuid, - content_type=content_type, - json=_json, - content=_content, + topic_name=topic_name, headers=_headers, params=_params, ) diff --git a/src/pydo/operations/_operations.py b/src/pydo/operations/_operations.py index bea0c691..1ddf3015 100644 --- a/src/pydo/operations/_operations.py +++ b/src/pydo/operations/_operations.py @@ -2368,6 +2368,138 @@ def build_databases_update_major_version_request( return HttpRequest(method="PUT", url=_url, headers=_headers, **kwargs) +def build_databases_list_kafka_topics_request( + database_cluster_uuid: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/databases/{database_cluster_uuid}/topics" + path_format_arguments = { + "database_cluster_uuid": _SERIALIZER.url( + "database_cluster_uuid", database_cluster_uuid, "str" + ), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + +def build_databases_create_kafka_topic_request( + database_cluster_uuid: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) # type: Optional[str] + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/databases/{database_cluster_uuid}/topics" + path_format_arguments = { + "database_cluster_uuid": _SERIALIZER.url( + "database_cluster_uuid", database_cluster_uuid, "str" + ), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header( + "content_type", content_type, "str" + ) + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, headers=_headers, **kwargs) + + +def build_databases_get_kafka_topic_request( + database_cluster_uuid: str, topic_name: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/databases/{database_cluster_uuid}/topics/{topic_name}" + path_format_arguments = { + "database_cluster_uuid": _SERIALIZER.url( + "database_cluster_uuid", database_cluster_uuid, "str" + ), + "topic_name": _SERIALIZER.url("topic_name", topic_name, "str"), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, headers=_headers, **kwargs) + + +def build_databases_update_kafka_topic_request( + database_cluster_uuid: str, topic_name: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + content_type = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) # type: Optional[str] + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/databases/{database_cluster_uuid}/topics/{topic_name}" + path_format_arguments = { + "database_cluster_uuid": _SERIALIZER.url( + "database_cluster_uuid", database_cluster_uuid, "str" + ), + "topic_name": _SERIALIZER.url("topic_name", topic_name, "str"), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header( + "content_type", content_type, "str" + ) + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, headers=_headers, **kwargs) + + +def build_databases_delete_kafka_topic_request( + database_cluster_uuid: str, topic_name: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/v2/databases/{database_cluster_uuid}/topics/{topic_name}" + path_format_arguments = { + "database_cluster_uuid": _SERIALIZER.url( + "database_cluster_uuid", database_cluster_uuid, "str" + ), + "topic_name": _SERIALIZER.url("topic_name", topic_name, "str"), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, headers=_headers, **kwargs) + + def build_domains_list_request( *, per_page: int = 20, page: int = 1, **kwargs: Any ) -> HttpRequest: @@ -75027,6 +75159,30 @@ def list_options(self, **kwargs: Any) -> JSON: # response body for status code(s): 200 response == { "options": { + "kafka": { + "layouts": [ + { + "num_nodes": 0, # Optional. An array of + objects, each indicating the node sizes (otherwise referred to as + slugs) that are available with various numbers of nodes in the + database cluster. Each slugs denotes the node's identifier, CPU, + and RAM (in that order). + "sizes": [ + "str" # Optional. An array of + objects containing the slugs available with various node + counts. + ] + } + ], + "regions": [ + "str" # Optional. An array of strings containing the + names of available regions. + ], + "versions": [ + "str" # Optional. An array of strings containing the + names of available regions. + ] + }, "mongodb": { "layouts": [ { @@ -75125,6 +75281,19 @@ def list_options(self, **kwargs: Any) -> JSON: } }, "version_availability": { + "kafka": [ + { + "end_of_availability": "str", # Optional. A + timestamp referring to the date when the particular version will no + longer be available for creating new clusters. If null, the version + does not have an end of availability timeline. + "end_of_life": "str", # Optional. A timestamp + referring to the date when the particular version will no longer be + supported. If null, the version does not have an end of life + timeline. + "version": "str" # Optional. The engine version. + } + ], "mongodb": [ { "end_of_availability": "str", # Optional. A @@ -75317,8 +75486,9 @@ def list_clusters(self, *, tag_name: Optional[str] = None, **kwargs: Any) -> JSO ], "engine": "str", # A slug representing the database engine used for the cluster. The possible values are: "pg" for PostgreSQL, - "mysql" for MySQL, "redis" for Redis, and "mongodb" for MongoDB. - Required. Known values are: "pg", "mysql", "redis", and "mongodb". + "mysql" for MySQL, "redis" for Redis, "mongodb" for MongoDB, and "kafka" + for Kafka. Required. Known values are: "pg", "mysql", "redis", "mongodb", + and "kafka". "id": "str", # Optional. A unique ID that can be used to identify and reference a database cluster. "maintenance_window": { @@ -75397,6 +75567,10 @@ def list_clusters(self, *, tag_name: Optional[str] = None, **kwargs: Any) -> JSO ], "users": [ { + "access_cert": "str", # Optional. Access + certificate for TLS client authentication. (Kafka only). + "access_key": "str", # Optional. Access key + for TLS client authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for @@ -75412,10 +75586,31 @@ def list_clusters(self, *, tag_name: Optional[str] = None, **kwargs: Any) -> JSO user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An + identifier for the ACL. Required. + "permission": "str", + # Permission set applied to the ACL. 'consume' allows + for messages to be consumed from the topic. 'produce' + allows for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for + 'produceconsume' as well as any operations to + administer the topic (delete, update). Required. + Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str" # A + regex for matching the topic(s) that this ACL should + apply to. Required. + } + ] + } } ], "version": "str", # Optional. A string representing the @@ -75579,8 +75774,8 @@ def create_cluster( ], "engine": "str", # A slug representing the database engine used for the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, "redis" - for Redis, and "mongodb" for MongoDB. Required. Known values are: "pg", "mysql", - "redis", and "mongodb". + for Redis, "mongodb" for MongoDB, and "kafka" for Kafka. Required. Known values + are: "pg", "mysql", "redis", "mongodb", and "kafka". "id": "str", # Optional. A unique ID that can be used to identify and reference a database cluster. "maintenance_window": { @@ -75652,6 +75847,10 @@ def create_cluster( ], "users": [ { + "access_cert": "str", # Optional. Access certificate for TLS + client authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -75664,9 +75863,28 @@ def create_cluster( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the database - user's role. The value will be either"n"primary" or "normal". Known - values are: "primary" and "normal". + "role": "str", # Optional. A string representing the + database user's role. The value will be either"n"primary" or "normal". + Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier for the + ACL. Required. + "permission": "str", # Permission + set applied to the ACL. 'consume' allows for messages to be + consumed from the topic. 'produce' allows for messages to be + published to the topic. 'produceconsume' allows for both + 'consume' and 'produce' permission. 'admin' allows for + 'produceconsume' as well as any operations to administer the + topic (delete, update). Required. Known values are: "admin", + "consume", "produce", and "produceconsume". + "topic": "str" # A regex for + matching the topic(s) that this ACL should apply to. + Required. + } + ] + } } ], "version": "str", # Optional. A string representing the version of the @@ -75707,8 +75925,8 @@ def create_cluster( ], "engine": "str", # A slug representing the database engine used for the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, - "redis" for Redis, and "mongodb" for MongoDB. Required. Known values are: - "pg", "mysql", "redis", and "mongodb". + "redis" for Redis, "mongodb" for MongoDB, and "kafka" for Kafka. Required. + Known values are: "pg", "mysql", "redis", "mongodb", and "kafka". "id": "str", # Optional. A unique ID that can be used to identify and reference a database cluster. "maintenance_window": { @@ -75783,6 +76001,10 @@ def create_cluster( ], "users": [ { + "access_cert": "str", # Optional. Access certificate + for TLS client authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS + client authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the @@ -75797,9 +76019,29 @@ def create_cluster( Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier + for the ACL. Required. + "permission": "str", # + Permission set applied to the ACL. 'consume' allows for + messages to be consumed from the topic. 'produce' allows + for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' + permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, + update). Required. Known values are: "admin", "consume", + "produce", and "produceconsume". + "topic": "str" # A regex for + matching the topic(s) that this ACL should apply to. + Required. + } + ] + } } ], "version": "str", # Optional. A string representing the version of @@ -75887,8 +76129,8 @@ def create_cluster( ], "engine": "str", # A slug representing the database engine used for the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, - "redis" for Redis, and "mongodb" for MongoDB. Required. Known values are: - "pg", "mysql", "redis", and "mongodb". + "redis" for Redis, "mongodb" for MongoDB, and "kafka" for Kafka. Required. + Known values are: "pg", "mysql", "redis", "mongodb", and "kafka". "id": "str", # Optional. A unique ID that can be used to identify and reference a database cluster. "maintenance_window": { @@ -75963,6 +76205,10 @@ def create_cluster( ], "users": [ { + "access_cert": "str", # Optional. Access certificate + for TLS client authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS + client authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the @@ -75977,9 +76223,29 @@ def create_cluster( Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier + for the ACL. Required. + "permission": "str", # + Permission set applied to the ACL. 'consume' allows for + messages to be consumed from the topic. 'produce' allows + for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' + permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, + update). Required. Known values are: "admin", "consume", + "produce", and "produceconsume". + "topic": "str" # A regex for + matching the topic(s) that this ACL should apply to. + Required. + } + ] + } } ], "version": "str", # Optional. A string representing the version of @@ -76065,8 +76331,8 @@ def create_cluster(self, body: Union[JSON, IO], **kwargs: Any) -> JSON: ], "engine": "str", # A slug representing the database engine used for the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, - "redis" for Redis, and "mongodb" for MongoDB. Required. Known values are: - "pg", "mysql", "redis", and "mongodb". + "redis" for Redis, "mongodb" for MongoDB, and "kafka" for Kafka. Required. + Known values are: "pg", "mysql", "redis", "mongodb", and "kafka". "id": "str", # Optional. A unique ID that can be used to identify and reference a database cluster. "maintenance_window": { @@ -76141,6 +76407,10 @@ def create_cluster(self, body: Union[JSON, IO], **kwargs: Any) -> JSON: ], "users": [ { + "access_cert": "str", # Optional. Access certificate + for TLS client authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS + client authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the @@ -76155,9 +76425,29 @@ def create_cluster(self, body: Union[JSON, IO], **kwargs: Any) -> JSON: Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier + for the ACL. Required. + "permission": "str", # + Permission set applied to the ACL. 'consume' allows for + messages to be consumed from the topic. 'produce' allows + for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' + permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, + update). Required. Known values are: "admin", "consume", + "produce", and "produceconsume". + "topic": "str" # A regex for + matching the topic(s) that this ACL should apply to. + Required. + } + ] + } } ], "version": "str", # Optional. A string representing the version of @@ -76317,8 +76607,8 @@ def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: ], "engine": "str", # A slug representing the database engine used for the cluster. The possible values are: "pg" for PostgreSQL, "mysql" for MySQL, - "redis" for Redis, and "mongodb" for MongoDB. Required. Known values are: - "pg", "mysql", "redis", and "mongodb". + "redis" for Redis, "mongodb" for MongoDB, and "kafka" for Kafka. Required. + Known values are: "pg", "mysql", "redis", "mongodb", and "kafka". "id": "str", # Optional. A unique ID that can be used to identify and reference a database cluster. "maintenance_window": { @@ -76393,6 +76683,10 @@ def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: ], "users": [ { + "access_cert": "str", # Optional. Access certificate + for TLS client authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS + client authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the @@ -76407,9 +76701,29 @@ def get_cluster(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier + for the ACL. Required. + "permission": "str", # + Permission set applied to the ACL. 'consume' allows for + messages to be consumed from the topic. 'produce' allows + for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' + permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, + update). Required. Known values are: "admin", "consume", + "produce", and "produceconsume". + "topic": "str" # A regex for + matching the topic(s) that this ACL should apply to. + Required. + } + ] + } } ], "version": "str", # Optional. A string representing the version of @@ -79664,6 +79978,10 @@ def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: response == { "users": [ { + "access_cert": "str", # Optional. Access certificate for TLS + client authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -79676,9 +79994,28 @@ def list_users(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the database - user's role. The value will be either"n"primary" or "normal". Known - values are: "primary" and "normal". + "role": "str", # Optional. A string representing the + database user's role. The value will be either"n"primary" or "normal". + Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier for the + ACL. Required. + "permission": "str", # Permission + set applied to the ACL. 'consume' allows for messages to be + consumed from the topic. 'produce' allows for messages to be + published to the topic. 'produceconsume' allows for both + 'consume' and 'produce' permission. 'admin' allows for + 'produceconsume' as well as any operations to administer the + topic (delete, update). Required. Known values are: "admin", + "consume", "produce", and "produceconsume". + "topic": "str" # A regex for + matching the topic(s) that this ACL should apply to. + Required. + } + ] + } } ] } @@ -79784,6 +80121,9 @@ def add_user( When adding a user to a MySQL cluster, additional options can be configured in the ``mysql_settings`` object. + When adding a user to a Kafka cluster, additional options can be configured in + the ``settings`` object. + The response will be a JSON object with a key called ``user``. The value of this will be an object that contains the standard attributes associated with a database user including its randomly generated password. @@ -79804,6 +80144,10 @@ def add_user( # JSON input template you can fill out and use as your body input. body = { + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client authentication. + (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user account. The valid values are @@ -79818,14 +80162,34 @@ def add_user( "readonly": bool, # Optional. For MongoDB clusters, set to ``true`` to create a read-only user."nThis option is not currently supported for other database engines. - "role": "str" # Optional. A string representing the database user's role. + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier for the ACL. Required. + "permission": "str", # Permission set applied to the + ACL. 'consume' allows for messages to be consumed from the topic. + 'produce' allows for messages to be published to the topic. + 'produceconsume' allows for both 'consume' and 'produce' permission. + 'admin' allows for 'produceconsume' as well as any operations to + administer the topic (delete, update). Required. Known values are: + "admin", "consume", "produce", and "produceconsume". + "topic": "str" # A regex for matching the topic(s) + that this ACL should apply to. Required. + } + ] + } } # response body for status code(s): 201 response == { "user": { + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -79838,9 +80202,27 @@ def add_user( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the database user's + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier for the ACL. + Required. + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str" # A regex for matching the + topic(s) that this ACL should apply to. Required. + } + ] + } } } # response body for status code(s): 404 @@ -79875,6 +80257,9 @@ def add_user( When adding a user to a MySQL cluster, additional options can be configured in the ``mysql_settings`` object. + When adding a user to a Kafka cluster, additional options can be configured in + the ``settings`` object. + The response will be a JSON object with a key called ``user``. The value of this will be an object that contains the standard attributes associated with a database user including its randomly generated password. @@ -79896,6 +80281,10 @@ def add_user( # response body for status code(s): 201 response == { "user": { + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -79908,9 +80297,27 @@ def add_user( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the database user's + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier for the ACL. + Required. + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str" # A regex for matching the + topic(s) that this ACL should apply to. Required. + } + ] + } } } # response body for status code(s): 404 @@ -79940,6 +80347,9 @@ def add_user( When adding a user to a MySQL cluster, additional options can be configured in the ``mysql_settings`` object. + When adding a user to a Kafka cluster, additional options can be configured in + the ``settings`` object. + The response will be a JSON object with a key called ``user``. The value of this will be an object that contains the standard attributes associated with a database user including its randomly generated password. @@ -79961,6 +80371,10 @@ def add_user( # response body for status code(s): 201 response == { "user": { + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -79973,9 +80387,27 @@ def add_user( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the database user's + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier for the ACL. + Required. + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str" # A regex for matching the + topic(s) that this ACL should apply to. Required. + } + ] + } } } # response body for status code(s): 404 @@ -80089,9 +80521,11 @@ def get_user( The response will be a JSON object with a ``user`` key. This will be set to an object containing the standard database user attributes. - For MySQL clusters, additional options will be contained in the mysql_settings + For MySQL clusters, additional options will be contained in the ``mysql_settings`` object. + For Kafka clusters, additional options will be contained in the ``settings`` object. + :param database_cluster_uuid: A unique identifier for a database cluster. Required. :type database_cluster_uuid: str :param username: The name of the database user. Required. @@ -80106,6 +80540,10 @@ def get_user( # response body for status code(s): 200 response == { "user": { + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -80118,9 +80556,27 @@ def get_user( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the database user's + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier for the ACL. + Required. + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str" # A regex for matching the + topic(s) that this ACL should apply to. Required. + } + ] + } } } # response body for status code(s): 404 @@ -80365,6 +80821,10 @@ def reset_auth( # response body for status code(s): 200 response == { "user": { + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -80377,9 +80837,27 @@ def reset_auth( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the database user's + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier for the ACL. + Required. + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str" # A regex for matching the + topic(s) that this ACL should apply to. Required. + } + ] + } } } # response body for status code(s): 404 @@ -80436,6 +80914,10 @@ def reset_auth( # response body for status code(s): 200 response == { "user": { + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -80448,9 +80930,27 @@ def reset_auth( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the database user's + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier for the ACL. + Required. + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str" # A regex for matching the + topic(s) that this ACL should apply to. Required. + } + ] + } } } # response body for status code(s): 404 @@ -80505,6 +81005,10 @@ def reset_auth( # response body for status code(s): 200 response == { "user": { + "access_cert": "str", # Optional. Access certificate for TLS client + authentication. (Kafka only). + "access_key": "str", # Optional. Access key for TLS client + authentication. (Kafka only). "mysql_settings": { "auth_plugin": "str" # A string specifying the authentication method to be used for connections"nto the MySQL user @@ -80517,9 +81021,27 @@ def reset_auth( "name": "str", # The name of a database user. Required. "password": "str", # Optional. A randomly generated password for the database user. - "role": "str" # Optional. A string representing the database user's + "role": "str", # Optional. A string representing the database user's role. The value will be either"n"primary" or "normal". Known values are: "primary" and "normal". + "settings": { + "acl": [ + { + "id": "str", # An identifier for the ACL. + Required. + "permission": "str", # Permission set + applied to the ACL. 'consume' allows for messages to be consumed + from the topic. 'produce' allows for messages to be published to + the topic. 'produceconsume' allows for both 'consume' and + 'produce' permission. 'admin' allows for 'produceconsume' as well + as any operations to administer the topic (delete, update). + Required. Known values are: "admin", "consume", "produce", and + "produceconsume". + "topic": "str" # A regex for matching the + topic(s) that this ACL should apply to. Required. + } + ] + } } } # response body for status code(s): 404 @@ -83127,6 +83649,1871 @@ def update_major_version( return deserialized + @distributed_trace + def list_kafka_topics(self, database_cluster_uuid: str, **kwargs: Any) -> JSON: + """List Topics for a Kafka Cluster. + + To list all of a Kafka cluster's topics, send a GET request to + ``/v2/databases/$DATABASE_ID/topics``. + + The result will be a JSON object with a ``topics`` key. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "topics": [ + { + "name": "str", # Optional. The name of the Kafka topic. + "partition_count": 0, # Optional. The number of partitions + available for the topic. On update, this value can only be increased. + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. + Known values are: "active", "configuring", "deleting", and "unknown". + } + ] + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 401: lambda response: ClientAuthenticationError(response=response), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls = kwargs.pop("cls", None) # type: ClsType[JSON] + + request = build_databases_list_kafka_topics_request( + database_cluster_uuid=database_cluster_uuid, + headers=_headers, + params=_params, + ) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + map_error( + status_code=response.status_code, response=response, error_map=error_map + ) + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) + + return cast(JSON, deserialized) + + @overload + def create_kafka_topic( + self, + database_cluster_uuid: str, + body: Optional[JSON] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + """Create Topic for a Kafka Cluster. + + To create a topic attached to a Kafka cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/topics``. + + The result will be a JSON object with a ``topic`` key. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Default value is None. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "config": { + "cleanup_policy": "delete", # Optional. Default value is "delete". + The cleanup_policy sets the retention policy to use on log segments. 'delete' + will discard old segments when retention time/size limits are reached. + 'compact' will enable log compaction, resulting in retention of the latest + value for each key. Known values are: "delete", "compact", and + "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the topic. + Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value is + 86400000. The delete_retention_ms specifies how long (in ms) to retain delete + tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is 60000. + The file_delete_delay_ms specifies the time (in ms) to wait before deleting a + file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default value is + 9223372036854776000. The flush_messages specifies the number of messages to + accumulate on a log partition before messages are flushed to disk. + "flush_ms": 9223372036854776000, # Optional. Default value is + 9223372036854776000. The flush_ms specifies the maximum time (in ms) that a + message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is 4096. The + index_interval_bytes specifies the number of bytes between entries being + added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The max_compaction_lag_ms specifies the maximum + amount of time (in ms) that a message will remain uncompacted. This is only + applicable if the logs are have compaction enabled. + "max_message_bytes": 1048588, # Optional. Default value is 1048588. + The max_messages_bytes specifies the largest record batch size (in bytes) + that can be sent to the server. This is calculated after compression if + compression is enabled. + "message_down_conversion_enable": True, # Optional. Default value is + True. The message_down_conversion_enable specifies whether down-conversion of + message formats is enabled to satisfy consumer requests. When 'false', the + broker will not perform conversion for consumers expecting older message + formats. The broker will respond with an ``UNSUPPORTED_VERSION`` error for + consume requests from these older clients. + "message_format_version": "3.0-IV1", # Optional. Default value is + "3.0-IV1". The message_format_version specifies the message format version + used by the broker to append messages to the logs. The value of this setting + is assumed to be 3.0-IV1 if the broker protocol version is 3.0 or higher. By + setting a particular message format version, all existing messages on disk + must be smaller or equal to the specified version. Known values are: "0.8.0", + "0.8.1", "0.8.2", "0.9.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", + "0.10.1-IV1", "0.10.1-IV2", "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", + "0.11.0-IV2", "1.0-IV0", "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", + "2.1-IV1", "2.1-IV2", "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", + "2.4-IV1", "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", "3.3-IV1", + "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default value + is "create_time". The message_timestamp_type specifies whether to use the + message create time or log append time as the timestamp on a message. Known + values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value is 0.5. + The min_cleanable_dirty_ratio specifies the frequency of log compaction (if + enabled) in relation to duplicates present in the logs. For example, at 0.5, + at most 50% of the log could be duplicates before compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. The + min_compaction_lag_ms specifies the minimum time (in ms) that a message will + remain uncompacted in the log. Only relevant if log compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. The + min_insync_replicas specifies the number of replicas that must ACK a write + for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. The + preallocate specifies whether a file should be preallocated on disk when + creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is 604800000. + The retention_ms specifies the maximum amount of time (in ms) to keep a + message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is 209715200. + The segment_bytes specifies the maximum size of a single log file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000, # Optional. Default value is 604800000. The + segment_ms specifies the period of time after which the log will be forced to + roll if the segment file isn't full. This ensures that retention can delete + or compact old data. + "unclean_leader_election_enable": False # Optional. Default value is + False. Whether unclean_leader_election_enable specifies whether to allow + replicas that are not insync to be elected as leaders as a last resort. This + may result in data loss since those leaders are not insync. + }, + "name": "str", # Optional. The name of the Kafka topic. + "partition_count": 0, # Optional. The number of partitions available for the + topic. On update, this value can only be increased. + "replication_factor": 0 # Optional. The number of nodes to replicate data + across the cluster. + } + + # response body for status code(s): 201 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000, # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + "unclean_leader_election_enable": False # Optional. Default + value is False. Whether unclean_leader_election_enable specifies whether + to allow replicas that are not insync to be elected as leaders as a last + resort. This may result in data loss since those leaders are not insync. + }, + "name": "str", # The name of the Kafka topic. Required. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def create_kafka_topic( + self, + database_cluster_uuid: str, + body: Optional[IO] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + """Create Topic for a Kafka Cluster. + + To create a topic attached to a Kafka cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/topics``. + + The result will be a JSON object with a ``topic`` key. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Default value is None. + :type body: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 201 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000, # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + "unclean_leader_election_enable": False # Optional. Default + value is False. Whether unclean_leader_election_enable specifies whether + to allow replicas that are not insync to be elected as leaders as a last + resort. This may result in data loss since those leaders are not insync. + }, + "name": "str", # The name of the Kafka topic. Required. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def create_kafka_topic( + self, + database_cluster_uuid: str, + body: Optional[Union[JSON, IO]] = None, + **kwargs: Any + ) -> JSON: + """Create Topic for a Kafka Cluster. + + To create a topic attached to a Kafka cluster, send a POST request to + ``/v2/databases/$DATABASE_ID/topics``. + + The result will be a JSON object with a ``topic`` key. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param body: Is either a model type or a IO type. Default value is None. + :type body: JSON or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 201 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000, # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + "unclean_leader_election_enable": False # Optional. Default + value is False. Whether unclean_leader_election_enable specifies whether + to allow replicas that are not insync to be elected as leaders as a last + resort. This may result in data loss since those leaders are not insync. + }, + "name": "str", # The name of the Kafka topic. Required. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 401: lambda response: ClientAuthenticationError(response=response), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) # type: Optional[str] + cls = kwargs.pop("cls", None) # type: ClsType[JSON] + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IO, bytes)): + _content = body + else: + if body is not None: + _json = body + else: + _json = None + + request = build_databases_create_kafka_topic_request( + database_cluster_uuid=database_cluster_uuid, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201, 404]: + map_error( + status_code=response.status_code, response=response, error_map=error_map + ) + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 201: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) + + return cast(JSON, deserialized) + + @distributed_trace + def get_kafka_topic( + self, database_cluster_uuid: str, topic_name: str, **kwargs: Any + ) -> JSON: + """Get Topic for a Kafka Cluster. + + To retrieve a given topic by name from the set of a Kafka cluster's topics, + send a GET request to ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + + The result will be a JSON object with a ``topic`` key. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param topic_name: The name used to identify the Kafka topic. Required. + :type topic_name: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000, # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + "unclean_leader_election_enable": False # Optional. Default + value is False. Whether unclean_leader_election_enable specifies whether + to allow replicas that are not insync to be elected as leaders as a last + resort. This may result in data loss since those leaders are not insync. + }, + "name": "str", # The name of the Kafka topic. Required. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 401: lambda response: ClientAuthenticationError(response=response), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls = kwargs.pop("cls", None) # type: ClsType[JSON] + + request = build_databases_get_kafka_topic_request( + database_cluster_uuid=database_cluster_uuid, + topic_name=topic_name, + headers=_headers, + params=_params, + ) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + map_error( + status_code=response.status_code, response=response, error_map=error_map + ) + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) + + return cast(JSON, deserialized) + + @overload + def update_kafka_topic( + self, + database_cluster_uuid: str, + topic_name: str, + body: Optional[JSON] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + """Update Topic for a Kafka Cluster. + + To update a topic attached to a Kafka cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + + The result will be a JSON object with a ``topic`` key. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param topic_name: The name used to identify the Kafka topic. Required. + :type topic_name: str + :param body: Default value is None. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # JSON input template you can fill out and use as your body input. + body = { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000, # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + "unclean_leader_election_enable": False # Optional. Default + value is False. Whether unclean_leader_election_enable specifies whether + to allow replicas that are not insync to be elected as leaders as a last + resort. This may result in data loss since those leaders are not insync. + }, + "name": "str", # Optional. The name of the Kafka topic. + "partition_count": 0, # Optional. The number of partitions available + for the topic. On update, this value can only be increased. + "replication_factor": 0 # Optional. The number of nodes to replicate + data across the cluster. + } + } + + # response body for status code(s): 200 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000, # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + "unclean_leader_election_enable": False # Optional. Default + value is False. Whether unclean_leader_election_enable specifies whether + to allow replicas that are not insync to be elected as leaders as a last + resort. This may result in data loss since those leaders are not insync. + }, + "name": "str", # The name of the Kafka topic. Required. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @overload + def update_kafka_topic( + self, + database_cluster_uuid: str, + topic_name: str, + body: Optional[IO] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> JSON: + """Update Topic for a Kafka Cluster. + + To update a topic attached to a Kafka cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + + The result will be a JSON object with a ``topic`` key. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param topic_name: The name used to identify the Kafka topic. Required. + :type topic_name: str + :param body: Default value is None. + :type body: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000, # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + "unclean_leader_election_enable": False # Optional. Default + value is False. Whether unclean_leader_election_enable specifies whether + to allow replicas that are not insync to be elected as leaders as a last + resort. This may result in data loss since those leaders are not insync. + }, + "name": "str", # The name of the Kafka topic. Required. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + + @distributed_trace + def update_kafka_topic( + self, + database_cluster_uuid: str, + topic_name: str, + body: Optional[Union[JSON, IO]] = None, + **kwargs: Any + ) -> JSON: + """Update Topic for a Kafka Cluster. + + To update a topic attached to a Kafka cluster, send a PUT request to + ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + + The result will be a JSON object with a ``topic`` key. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param topic_name: The name used to identify the Kafka topic. Required. + :type topic_name: str + :param body: Is either a model type or a IO type. Default value is None. + :type body: JSON or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :return: JSON object + :rtype: JSON + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 200 + response == { + "topic": { + "config": { + "cleanup_policy": "delete", # Optional. Default value is + "delete". The cleanup_policy sets the retention policy to use on log + segments. 'delete' will discard old segments when retention time/size + limits are reached. 'compact' will enable log compaction, resulting in + retention of the latest value for each key. Known values are: "delete", + "compact", and "compact_delete". + "compression_type": "producer", # Optional. Default value is + "producer". The compression_type specifies the compression type of the + topic. Known values are: "producer", "gzip", "snappy", "Iz4", "zstd", and + "uncompressed". + "delete_retention_ms": 86400000, # Optional. Default value + is 86400000. The delete_retention_ms specifies how long (in ms) to retain + delete tombstone markers for topics. + "file_delete_delay_ms": 60000, # Optional. Default value is + 60000. The file_delete_delay_ms specifies the time (in ms) to wait before + deleting a file from the filesystem. + "flush_messages": 9223372036854776000, # Optional. Default + value is 9223372036854776000. The flush_messages specifies the number of + messages to accumulate on a log partition before messages are flushed to + disk. + "flush_ms": 9223372036854776000, # Optional. Default value + is 9223372036854776000. The flush_ms specifies the maximum time (in ms) + that a message is kept in memory before being flushed to disk. + "index_interval_bytes": 4096, # Optional. Default value is + 4096. The index_interval_bytes specifies the number of bytes between + entries being added into te offset index. + "max_compaction_lag_ms": 9223372036854776000, # Optional. + Default value is 9223372036854776000. The max_compaction_lag_ms specifies + the maximum amount of time (in ms) that a message will remain + uncompacted. This is only applicable if the logs are have compaction + enabled. + "max_message_bytes": 1048588, # Optional. Default value is + 1048588. The max_messages_bytes specifies the largest record batch size + (in bytes) that can be sent to the server. This is calculated after + compression if compression is enabled. + "message_down_conversion_enable": True, # Optional. Default + value is True. The message_down_conversion_enable specifies whether + down-conversion of message formats is enabled to satisfy consumer + requests. When 'false', the broker will not perform conversion for + consumers expecting older message formats. The broker will respond with + an ``UNSUPPORTED_VERSION`` error for consume requests from these older + clients. + "message_format_version": "3.0-IV1", # Optional. Default + value is "3.0-IV1". The message_format_version specifies the message + format version used by the broker to append messages to the logs. The + value of this setting is assumed to be 3.0-IV1 if the broker protocol + version is 3.0 or higher. By setting a particular message format + version, all existing messages on disk must be smaller or equal to the + specified version. Known values are: "0.8.0", "0.8.1", "0.8.2", "0.9.0", + "0.10.0-IV0", "0.10.0-IV1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", + "0.10.2-IV0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0-IV0", + "1.1-IV0", "2.0-IV0", "2.0-IV1", "2.1-IV0", "2.1-IV1", "2.1-IV2", + "2.2-IV0", "2.2-IV1", "2.3-IV0", "2.3-IV1", "2.4-IV0", "2.4-IV1", + "2.5-IV0", "2.6-IV0", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8-IV0", + "2.8-IV1", "3.0-IV0", "3.0-IV1", "3.1-IV0", "3.2-IV0", "3.3-IV0", + "3.3-IV1", "3.3-IV2", and "3.3-IV3". + "message_timestamp_type": "create_time", # Optional. Default + value is "create_time". The message_timestamp_type specifies whether to + use the message create time or log append time as the timestamp on a + message. Known values are: "create_time" and "log_append_time". + "min_cleanable_dirty_ratio": 0.5, # Optional. Default value + is 0.5. The min_cleanable_dirty_ratio specifies the frequency of log + compaction (if enabled) in relation to duplicates present in the logs. + For example, at 0.5, at most 50% of the log could be duplicates before + compaction would begin. + "min_compaction_lag_ms": 0, # Optional. Default value is 0. + The min_compaction_lag_ms specifies the minimum time (in ms) that a + message will remain uncompacted in the log. Only relevant if log + compaction is enabled. + "min_insync_replicas": 1, # Optional. Default value is 1. + The min_insync_replicas specifies the number of replicas that must ACK a + write for the write to be considered successful. + "preallocate": False, # Optional. Default value is False. + The preallocate specifies whether a file should be preallocated on disk + when creating a new log segment. + "retention_bytes": -1, # Optional. Default value is -1. The + retention_bytes specifies the maximum size of the log (in bytes) before + deleting messages. -1 indicates that there is no limit. + "retention_ms": 604800000, # Optional. Default value is + 604800000. The retention_ms specifies the maximum amount of time (in ms) + to keep a message before deleting it. + "segment_bytes": 209715200, # Optional. Default value is + 209715200. The segment_bytes specifies the maximum size of a single log + file (in bytes). + "segment_jitter_ms": 0, # Optional. Default value is 0. The + segment_jitter_ms specifies the maximum random jitter subtracted from the + scheduled segment roll time to avoid thundering herds of segment rolling. + "segment_ms": 604800000, # Optional. Default value is + 604800000. The segment_ms specifies the period of time after which the + log will be forced to roll if the segment file isn't full. This ensures + that retention can delete or compact old data. + "unclean_leader_election_enable": False # Optional. Default + value is False. Whether unclean_leader_election_enable specifies whether + to allow replicas that are not insync to be elected as leaders as a last + resort. This may result in data loss since those leaders are not insync. + }, + "name": "str", # The name of the Kafka topic. Required. + "partitions": [ + { + "consumer_groups": [ + { + "group_name": "str", # Optional. + Name of the consumer group. + "offset": 0 # Optional. The current + offset of the consumer group. + } + ], + "earliest_offset": 0, # Optional. The earliest + consumer offset amongst consumer groups. + "id": 0, # Optional. An identifier for the + partition. + "in_sync_replicas": 0, # Optional. The number of + nodes that are in-sync (have the latest data) for the given + partition. + "size": 0 # Optional. Size of the topic partition in + bytes. + } + ], + "replication_factor": 0, # Optional. The number of nodes to + replicate data across the cluster. + "state": "str" # Optional. The state of the Kafka topic. Known + values are: "active", "configuring", "deleting", and "unknown". + } + } + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 401: lambda response: ClientAuthenticationError(response=response), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type = kwargs.pop( + "content_type", _headers.pop("Content-Type", None) + ) # type: Optional[str] + cls = kwargs.pop("cls", None) # type: ClsType[JSON] + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(body, (IO, bytes)): + _content = body + else: + if body is not None: + _json = body + else: + _json = None + + request = build_databases_update_kafka_topic_request( + database_cluster_uuid=database_cluster_uuid, + topic_name=topic_name, + content_type=content_type, + json=_json, + content=_content, + headers=_headers, + params=_params, + ) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 404]: + map_error( + status_code=response.status_code, response=response, error_map=error_map + ) + raise HttpResponseError(response=response) + + response_headers = {} + if response.status_code == 200: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, cast(JSON, deserialized), response_headers) + + return cast(JSON, deserialized) + + @distributed_trace + def delete_kafka_topic( + self, database_cluster_uuid: str, topic_name: str, **kwargs: Any + ) -> Optional[JSON]: + """Delete Topic for a Kafka Cluster. + + To delete a single topic within a Kafka cluster, send a DELETE request + to ``/v2/databases/$DATABASE_ID/topics/$TOPIC_NAME``. + + A status of 204 will be given. This indicates that the request was + processed successfully, but that no response body is needed. + + :param database_cluster_uuid: A unique identifier for a database cluster. Required. + :type database_cluster_uuid: str + :param topic_name: The name used to identify the Kafka topic. Required. + :type topic_name: str + :return: JSON object or None + :rtype: JSON or None + :raises ~azure.core.exceptions.HttpResponseError: + + Example: + .. code-block:: python + + # response body for status code(s): 404 + response == { + "id": "str", # A short identifier corresponding to the HTTP status code + returned. For example, the ID for a response returning a 404 status code would + be "not_found.". Required. + "message": "str", # A message providing additional information about the + error, including details to help resolve it when possible. Required. + "request_id": "str" # Optional. Optionally, some endpoints may include a + request ID that should be provided when reporting bugs or opening support + tickets to help identify the issue. + } + """ + error_map = { + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 401: lambda response: ClientAuthenticationError(response=response), + 429: HttpResponseError, + 500: HttpResponseError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSON]] + + request = build_databases_delete_kafka_topic_request( + database_cluster_uuid=database_cluster_uuid, + topic_name=topic_name, + headers=_headers, + params=_params, + ) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204, 404]: + map_error( + status_code=response.status_code, response=response, error_map=error_map + ) + raise HttpResponseError(response=response) + + deserialized = None + response_headers = {} + if response.status_code == 204: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.status_code == 404: + response_headers["ratelimit-limit"] = self._deserialize( + "int", response.headers.get("ratelimit-limit") + ) + response_headers["ratelimit-remaining"] = self._deserialize( + "int", response.headers.get("ratelimit-remaining") + ) + response_headers["ratelimit-reset"] = self._deserialize( + "int", response.headers.get("ratelimit-reset") + ) + + if response.content: + deserialized = response.json() + else: + deserialized = None + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + class DomainsOperations: """