diff --git a/README.md b/README.md
index abd0cd22..4ac359a5 100644
--- a/README.md
+++ b/README.md
@@ -74,9 +74,22 @@ renameTable
::= 'RENAME' 'TABLE' oldName 'TO' newName
```
-Next, run the `scripts/railroad.py` script to generate the SVG image.
+Be sure to maintain proper indentation!
-During its final output, a markdown image with the appropriate syntax is printed.
+Next, run the `scripts/railroad.py [name]` script to generate the SVG image.
+
+For example:
+
+```shell
+python3 scripts/railroad.py renameTable
+```
+
+During its final output, a markdown image with the appropriate syntax is printed:
+
+```text
+Copy the image syntax below and paste it into your markdown file:
+![Diagram for renameTable](/images/docs/diagrams/renameTable.svg)
+```
Copy this syntax and paste it into the markdown file where you want the diagram to appear.
diff --git a/documentation/configuration-utils/_iam.config.json b/documentation/configuration-utils/_iam.config.json
new file mode 100644
index 00000000..54bdbd5d
--- /dev/null
+++ b/documentation/configuration-utils/_iam.config.json
@@ -0,0 +1,86 @@
+{
+ "acl.enabled": {
+ "default": "true",
+ "description": "Enables/disables Identity and Access Management."
+ },
+ "acl.admin.user.enabled": {
+ "default": "true",
+ "description": "Enables/disables the built-in admin user."
+ },
+ "acl.admin.user": {
+ "default": "admin",
+ "description": "Name of the built-in admin user."
+ },
+ "acl.admin.password": {
+ "default": "quest",
+ "description": "The password of the built-in admin user."
+ },
+ "acl.basic.auth.realm.enabled": {
+ "default": "false",
+ "description": "When enabled the browser's basic auth popup window is used instead of the Web Console's login screen. Only present for backwards compatibility."
+ },
+ "acl.entity.name.max.length": {
+ "default": "255",
+ "description": "Maximum length of user, group and service account names."
+ },
+ "acl.password.hash.iteration.count": {
+ "default": "100000",
+ "description": "QuestDB Enterprise never stores passwords in plain text, it stores password hashes only. This is the number of hash iterations used in password hashing. Higher means safer, almost never should be changed."
+ },
+ "acl.rest.token.refresh.threshold": {
+ "default": "10",
+ "description": "When a REST token is created in REFRESH mode, its TTL is extended on every successful authentication, unless the last successful authentication was within this threshold. This setting removes unnecessary overhead of continuously refreshing REST tokens if they are used often. The value is expressed in seconds."
+ },
+ "tls.enabled": {
+ "default": "false",
+ "description": "Enables/disables TLS encryption globally for all QuestDB interfaces (HTTP endpoints, ILP over TCP)."
+ },
+ "tls.cert.path": {
+ "default": "",
+ "description": "Path to certificate used for TLS encryption globally. The certificate should be DER-encoded and saved in PEM format."
+ },
+ "tls.private.key.path": {
+ "default": "",
+ "description": "Path to private key used for TLS encryption globally."
+ },
+ "http.tls.enabled": {
+ "default": "false",
+ "description": "Enables/disables TLS encryption for the HTTP server only."
+ },
+ "http.tls.cert.path": {
+ "default": "",
+ "description": "Path to certificate used for TLS encryption for the HTTP server only. The certificate should be DER-encoded and saved in PEM format."
+ },
+ "http.tls.private.key.path": {
+ "default": "",
+ "description": "Path to private key used for TLS encryption for the HTTP server only."
+ },
+ "http.min.tls.enabled": {
+ "default": "false",
+ "description": "Enables/disables TLS encryption for the minimal HTTP server only."
+ },
+ "http.min.tls.cert.path": {
+ "default": "",
+ "description": "Path to certificate used for TLS encryption for the minimal HTTP server only. The certificate should be DER-encoded and saved in PEM format."
+ },
+ "http.min.tls.private.key.path": {
+ "default": "",
+ "description": "Path to private key used for TLS encryption for the minimal HTTP server only."
+ },
+ "line.tcp.tls.enabled": {
+ "default": "false",
+ "description": "Enables/disables TLS encryption for ILP over TCP only."
+ },
+ "line.tcp.tls.cert.path": {
+ "default": "",
+ "description": "Path to certificate used for TLS encryption for ILP over TCP only. The certificate should be DER-encoded and saved in PEM format."
+ },
+ "line.tcp.tls.private.key.path": {
+ "default": "",
+ "description": "Path to private key used for TLS encryption for ILP over TCP only."
+ },
+ "line.tcp.acl.enabled": {
+ "default": "true",
+ "description": "Enables/disables authentication for the ILP over TCP endpoint only."
+ }
+}
diff --git a/documentation/configuration-utils/_oidc.config.json b/documentation/configuration-utils/_oidc.config.json
index d6bfae7c..c57d0985 100644
--- a/documentation/configuration-utils/_oidc.config.json
+++ b/documentation/configuration-utils/_oidc.config.json
@@ -1,26 +1,34 @@
{
"acl.oidc.enabled": {
- "default": false,
+ "default": "false",
"description": "Enables/disables OIDC authentication. When enabled, few other configuration options must also be set."
},
+ "acl.oidc.pkce.enabled": {
+ "default": "true",
+ "description": "Enables/disables PKCE for the Authorization Code Flow. This should always be enabled in a production environment, the Web Console is not fully secure without it."
+ },
"acl.oidc.ropc.flow.enabled": {
- "default": false,
- "description": "Enables/disables Resource Owner Password Credentials flow."
+ "default": "false",
+ "description": "Enables/disables Resource Owner Password Credentials flow. When enabled, this flow also has to be configured in the OIDC Provider."
+ },
+ "acl.oidc.configuration.url": {
+ "default": "",
+ "description": "URL where the OpenID Provider's configuration information cna be loaded in json format, should always end with `/.well-known/openid-configuration`."
},
"acl.oidc.host": {
"default": "",
- "description": "OIDC provider hostname, required when OIDC is enabled."
+ "description": "OIDC provider hostname. Required when OIDC is enabled, unless the OIDC configuration URL is set."
},
"acl.oidc.port": {
"default": 443,
- "description": "OIDC provider port number, required when OIDC is enabled."
+ "description": "OIDC provider port number."
},
"acl.oidc.tls.enabled": {
- "default": true,
- "description": "Whether the OIDC provider requires a secure connection or not. It is highly unlikely, but if the User Info Endpoint does not require a secure connection, this option can be set to `false`."
+ "default": "true",
+ "description": "Whether the OIDC provider requires a secure connection or not. It is highly unlikely in a production environment, but if the OpenID Provider endpoints do not require a secure connection, this option can be set to `false`."
},
"acl.oidc.tls.validation.enabled": {
- "default": true,
+ "default": "true",
"description": "Enables/disables TLS certificate validation. If you are working with self-signed certificates that you would like QuestDB to trust, disable this option. Validation is strongly recommended in production environments. QuestDB will check that the certificate is valid, and that it is issued for the server to which it connects."
},
"acl.oidc.tls.keystore.path": {
@@ -39,6 +47,10 @@
"default": "",
"description": "Client name assigned to QuestDB in the OIDC server, required when OIDC is enabled."
},
+ "acl.oidc.audience": {
+ "default": "",
+ "description": "OAuth2 audience as set on the tokens issued by the OIDC Provider, defaults to the client id."
+ },
"acl.oidc.redirect.uri": {
"default": "",
"description": "The redirect URI tells the OIDC server where to redirect the user after successful authentication. If not set, the Web Console defaults it to the location where it was loaded from (`window.location.href`)."
@@ -47,6 +59,10 @@
"default": "openid",
"description": "The OIDC server should ask consent for the list of scopes provided in this property. The scope `openid` is mandatory, and always should be included."
},
+ "acl.oidc.public.keys.endpoint": {
+ "default": "/pf/JWKS",
+ "description": "JSON Web Key Set (JWKS) Endpoint, the default value should work for the Ping Identity Platform. This endpoint provides the list of public keys can be used to decode and validate ID tokens issued by the OIDC Provider."
+ },
"acl.oidc.authorization.endpoint": {
"default": "/as/authorization.oauth2",
"description": "OIDC Authorization Endpoint, the default value should work for the Ping Identity Platform."
@@ -57,11 +73,19 @@
},
"acl.oidc.userinfo.endpoint": {
"default": "/idp/userinfo.openid",
- "description": "OIDC User Info Endpoint, the default value should work for the Ping Identity Platform."
+ "description": "OIDC User Info Endpoint, the default value should work for the Ping Identity Platform. Used to retrieve additional user information which contains the user's group memberships."
+ },
+ "acl.oidc.groups.encoded.in.token": {
+ "default": "false",
+ "description": "Should be set to false, if the OIDC Provider is configured to encode the group memberships of the user into the id token. When set to true, QuestDB will look for the groups in the token instead of calling the User Info endpoint."
+ },
+ "acl.oidc.sub.claim": {
+ "default": "sub",
+ "description": "The name of the claim in the user information, which contains the name of the user. Could be a username, the user's full name or email. It will be displayed in the Web Console, and logged for audit purposes."
},
"acl.oidc.groups.claim": {
"default": "groups",
- "description": "The name of the custom claim inside the user info response object, which contains the group memberships of the user."
+ "description": "The name of the custom claim in the user information, which contains the group memberships of the user."
},
"acl.oidc.cache.ttl": {
"default": 30000,
diff --git a/documentation/configuration.md b/documentation/configuration.md
index 20de4120..5a485e43 100644
--- a/documentation/configuration.md
+++ b/documentation/configuration.md
@@ -14,6 +14,7 @@ import postgresConfig from "./configuration-utils/\_postgres.config.json"
import tcpConfig from "./configuration-utils/\_tcp.config.json"
import udpConfig from "./configuration-utils/\_udp.config.json"
import replicationConfig from "./configuration-utils/\_replication.config.json"
+import iamConfig from "./configuration-utils/\_iam.config.json"
import oidcConfig from "./configuration-utils/\_oidc.config.json"
import logConfig from "./configuration-utils/\_log.config.json"
@@ -192,8 +193,8 @@ It is important that the two path are identical
### Parallel SQL execution
-This section describes settings that can affect parallelism level of SQL
-execution and therefore performance.
+This section describes settings that can affect the level of parallelism during
+SQL execution, and therefore can also have an impact on performance.
@@ -207,7 +208,7 @@ PostgresSQL wire protocol.
### InfluxDB Line Protocol (ILP)
This section describes ingestion settings for incoming messages using InfluxDB
-line protocol.
+Line Protocol.
| Property | Default | Description |
| ------------------------- | ------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -215,6 +216,8 @@ line protocol.
#### HTTP specific settings
+ILP over HTTP is the preferred way of ingesting data.
+
| Property | Default | Description |
| ---------------------- | ------- | ------------------------------------------------------------------------------------------------------------------------------------------------- |
| line.http.enabled | true | Enable ILP over HTTP. Default port is 9000. Enabled by default within open source versions, defaults to false and must be enabled for Enterprise. |
@@ -229,9 +232,8 @@ line protocol.
:::note
-The UDP receiver is deprecated since QuestDB version 6.5.2. We recommend the
-[InfluxDB Line Protocol TCP receiver](/docs/reference/api/ilp/overview/)
-instead.
+The UDP receiver is deprecated since QuestDB version 6.5.2. We recommend ILP over
+HTTP instead, or less frequently [ILP over TCP](/docs/reference/api/ilp/overview/).
:::
@@ -258,6 +260,23 @@ For a tuning guide see... the
+### Identity and Access Management (IAM)
+
+:::note
+
+Identity and Access Management is available within [QuestDB Enterprise](/enterprise/).
+
+:::
+
+Identity and Access Management (IAM) ensures that data can be accessed only
+by authorized users. The below configuration properties relate to various
+authentication and authorization features.
+
+For a full explanation of IAM, see the
+[Identity and Access Management (IAM) documentation](/docs/operations/rbac).
+
+
+
### OpenID Connect (OIDC)
:::note
@@ -266,10 +285,10 @@ OpenID Connect is [Enterprise](/enterprise/) and [Cloud](/cloud/) only.
:::
-Integrate with OpenID Connect (OIDC) to sync QuestDB with an Identity Provider
-(IdP).
+OpenID Connect (OIDC) support is part of QuestDB's Identity and Access Management.
+The database can be integrated with any OAuth2/OIDC Identity Provider (IdP).
-For a full explanation of OIDC, see the
+For detailed information about OIDC, see the
[OpenID Connect (OIDC) integration guide](/docs/operations/openid-connect-oidc-integration).
diff --git a/documentation/guides/create-database.md b/documentation/guides/create-database.md
index aeb7fceb..4bdcaf66 100644
--- a/documentation/guides/create-database.md
+++ b/documentation/guides/create-database.md
@@ -12,7 +12,7 @@ specific types.
For most applications, you will import your data using methods like the InfluxDB
Line Protocol, CSV imports, or integration with third-party tools such as
-Telegraf, [Kafka](/docs/third-party-tools/kafka/overview/), or Prometheus. If your interest lies in data ingestion rather
+Telegraf, [Kafka](/docs/third-party-tools/kafka), or Prometheus. If your interest lies in data ingestion rather
than generation, refer to our [ingestion overview](/docs/ingestion-overview/).
Alternatively, the [QuestDB demo instance](https://demo.questdb.io) offers a
practical way to explore data creation and manipulation without setting up your
diff --git a/documentation/guides/enterprise-quick-start.md b/documentation/guides/enterprise-quick-start.md
index 1a8d8282..3772684a 100644
--- a/documentation/guides/enterprise-quick-start.md
+++ b/documentation/guides/enterprise-quick-start.md
@@ -238,7 +238,7 @@ Now, this private key is then added to the client.
This provides authenticated access to QuestDB for the "ingest" user.
-For example, if you are leveraging Java:
+For example, if you are leveraging Java and our recommended InfluxDB Line Protocol over HTTP client:
```java
Java client example:
@@ -248,8 +248,8 @@ import java.time.temporal.ChronoUnit;
public class ILPMain {
public static void main(String[] args) {
- try (Sender sender = Sender.builder(Sender.Transport.TCP)
- .address("localhost:9009")
+ try (Sender sender = Sender.builder(Sender.Transport.HTTP)
+ .address("localhost:9000")
.enableTls()
.enableAuth("ingest")
.authToken("kom7j38LG44HcPfO92oZ4558e6KoeTHn6H5rA8vK3PQ")
@@ -272,7 +272,7 @@ Once generated, safely store it.
Connecting a client to ILP is a common path.
-However, you may use something like [Kafka](/docs/third-party-tools/kafka/overview/).
+However, you may use something like [Kafka](/docs/third-party-tools/kafka).
## 5. Ingest data, Kafka Connect (optional)
diff --git a/documentation/ingestion-overview.md b/documentation/ingestion-overview.md
index 7ffd6e95..a91fc32a 100644
--- a/documentation/ingestion-overview.md
+++ b/documentation/ingestion-overview.md
@@ -64,7 +64,7 @@ and/or queues.
Checkout our quick start guides for the following:
- [Flink](/docs/third-party-tools/flink)
-- [Kafka](/docs/third-party-tools/kafka/overview)
+- [Kafka](/docs/third-party-tools/kafka)
- [Redpanda](/docs/third-party-tools/redpanda)
- [Telegraf](/docs/third-party-tools/telegraf)
diff --git a/documentation/operations/rbac.md b/documentation/operations/rbac.md
index 682c5fbb..0c99244b 100644
--- a/documentation/operations/rbac.md
+++ b/documentation/operations/rbac.md
@@ -524,7 +524,7 @@ GRANT BACKUP DATABASE TO user;
of the database.
```questdb-sql
--database level
-GRANT ATTACH PARTITION TO user;
+GRANT ATTACH PARTITION ON ALL TABLES TO user;
--table level
GRANT ATTACH PARTITION ON table1, table2 TO user;
@@ -901,8 +901,9 @@ When a column is deleted and then re-created, permissions are re-instated.
### Owner grants
-When a user creates a new table or adds a new column to an existing table, it
-gets owner permissions on the newly created database object.
+When a user creates a new table or adds a new column to an existing table,
+it receives owner permissions on the newly created database object.
+The same stands for creating a new service account.
If the user creates a table, the user automatically gets all table level
permissions with the `GRANT` option on it.
@@ -910,6 +911,9 @@ permissions with the `GRANT` option on it.
If the user adds a new column to an existing table, the user automatically gets
all column level permissions with the `GRANT` option on it.
+If the user creates a new service account, the user automatically gets the
+`ASSUME SERVICE ACCOUNT` permission with the `GRANT` option on it.
+
In QuestDB ownership does not persist. This means that the user gets full
control over the newly created table or column at the time of creating it, but
if the permissions are later revoked, then the user cannot get it back without
diff --git a/documentation/reference/function/hash.md b/documentation/reference/function/hash.md
new file mode 100644
index 00000000..744b55a0
--- /dev/null
+++ b/documentation/reference/function/hash.md
@@ -0,0 +1,147 @@
+---
+title: Hash Functions
+sidebar_label: Hash
+description: Hash (md5, sha1 and sha256) SQL functions reference documentation and explanation.
+---
+
+Hash functions generate fixed-size string outputs from variable-length inputs.
+
+These functions are useful for data integrity verification, checksums, and data anonymization.
+
+## Supported functions
+
+- [`md5()`](#md5) – Generates a 128-bit (32 character) hash value
+- [`sha1()`](#sha1) – Generates a 160-bit (40 character) hash value
+- [`sha256()`](#sha256) – Generates a 256-bit (64 character) hash value
+
+## Function reference
+
+### md5()
+
+Calculates an MD5 hash of the input value and returns it as a hexadecimal string.
+
+**Arguments:**
+- String, varchar, or binary value
+
+**Return value:**
+- A 32-character hexadecimal string representing the MD5 hash
+- NULL if the input is NULL
+
+**Examples:**
+```questdb-sql title="md5() with string input" demo
+SELECT md5('abc');
+-- Returns: '900150983cd24fb0d6963f7d28e17f72'
+
+SELECT md5('');
+-- Returns: 'd41d8cd98f00b204e9800998ecf8427e'
+```
+
+```questdb-sql title="md5() with UTF-8 input" demo
+SELECT md5('Hello, world!');
+-- Returns: '6cd3556deb0da54bca060b4c39479839'
+```
+
+### sha1()
+
+Calculates a SHA1 hash of the input value and returns it as a hexadecimal string.
+
+**Arguments:**
+- String, varchar, or binary value
+
+**Return value:**
+- A 40-character hexadecimal string representing the SHA1 hash
+- NULL if the input is NULL
+
+**Examples:**
+```questdb-sql title="sha1() with string input" demo
+SELECT sha1('abc');
+-- Returns: 'a9993e364706816aba3e25717850c26c9cd0d89d'
+
+SELECT sha1('');
+-- Returns: 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
+```
+
+```questdb-sql title="sha1() with UTF-8 input" demo
+SELECT sha1('Hello, world!');
+-- Returns: '943a702d06f34599aee1f8da8ef9f7296031d699'
+```
+
+### sha256()
+
+Calculates a SHA256 hash of the input value and returns it as a hexadecimal string.
+
+**Arguments:**
+- String, varchar, or binary value
+
+**Return value:**
+- A 64-character hexadecimal string representing the SHA256 hash
+- NULL if the input is NULL
+
+**Examples:**
+```questdb-sql title="sha256() with string input" demo
+SELECT sha256('abc');
+-- Returns: 'ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad'
+
+SELECT sha256('');
+-- Returns: 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
+```
+
+```questdb-sql title="sha256() with UTF-8 input" demo
+SELECT sha256('Hello, world!');
+-- Returns: '315f5bdb76d078c43b8ac0064e4a0164612b1fce77c869345bfc94c75894edd3'
+```
+
+## Notes and restrictions
+
+### Input handling
+- All hash functions support string, varchar, and binary inputs
+- Empty strings produce a valid hash value
+- NULL inputs always return NULL outputs
+- UTF-8 strings are fully supported
+
+### Thread safety
+- Hash functions are not thread-safe
+- Each function instance maintains its own internal state
+
+### Output characteristics
+- Output is always lowercase hexadecimal
+- Output length is fixed regardless of input size:
+ - MD5: 32 characters
+ - SHA1: 40 characters
+ - SHA256: 64 characters
+
+### Implementation details
+- Uses Java's built-in MessageDigest implementations
+- Supported algorithms are guaranteed to be available on all Java platforms
+- Processes input in a single pass
+
+### Common use cases
+
+#### Data integrity verification
+
+```questdb-sql
+SELECT
+ filename,
+ sha256(content) = expected_hash as is_valid
+FROM files;
+```
+
+#### Anonymizing sensitive data
+
+```questdb-sql
+SELECT
+ md5(email) as hashed_email,
+ count(*) as user_count
+FROM users
+GROUP BY hashed_email;
+```
+
+#### Binary data hashing
+
+```questdb-sql
+SELECT
+ file_id,
+ sha1(binary_content) as content_hash
+FROM binary_files;
+```
+
diff --git a/documentation/reference/function/window.md b/documentation/reference/function/window.md
index d07d6326..97a987f9 100644
--- a/documentation/reference/function/window.md
+++ b/documentation/reference/function/window.md
@@ -104,6 +104,10 @@ Where:
- [`first_value()`](#first_value) – Retrieves the first value in a window
+- [`max()`](#max) – Returns the maximum value within a window
+
+- [`min()`](#min) – Returns the minimum value within a window
+
- [`rank()`](#rank) – Assigns a rank to rows
- [`row_number()`](#row_number) – Assigns sequential numbers to rows
@@ -142,20 +146,19 @@ Window frames specify which rows are included in the calculation relative to the
```mermaid
sequenceDiagram
- participant CurrentRow as Current Row (Time 09:04)
- participant Row1 as Row at 09:00
- participant Row2 as Row at 09:02
- participant Row3 as Row at 09:03
- participant Row4 as Row at 09:04
+ participant R1 as Row at 09:00
+ participant R2 as Row at 09:02
+ participant R3 as Row at 09:03
+ participant R4 as Row at 09:04
(Current Row)
- Note over CurrentRow: Calculating at 09:04
+ Note over R4: Calculating at 09:04
rect rgb(191, 223, 255)
- Note over Row2,CurrentRow: ROWS BETWEEN 2 PRECEDING AND CURRENT ROW
+ Note over R2,R4: ROWS BETWEEN 2 PRECEDING AND CURRENT ROW
end
rect rgb(255, 223, 191)
- Note over Row3,CurrentRow: RANGE BETWEEN '1' MINUTE PRECEDING AND CURRENT ROW
+ Note over R3,R4: RANGE BETWEEN
'1' MINUTE PRECEDING
AND CURRENT ROW
end
```
@@ -187,17 +190,18 @@ sequenceDiagram
### RANGE frame
:::note
+RANGE functions have a known issue. When using RANGE, all the rows with the same value will have the same output for the function. Read the [open issue](https://github.com/questdb/questdb/issues/5177) for more information.
+:::
-RANGE functions have a known issue.
-
-When using RANGE, all the rows with the same value will have the same output for
-the function.
-
-Read the [open issue](https://github.com/questdb/questdb/issues/5177) for more information.
+Defines the frame based on the actual values in the ORDER BY column, rather than counting rows. Unlike ROWS, which counts a specific number of rows, RANGE considers the values in the ORDER BY column to determine the window.
-:::
+Important requirements for RANGE:
+- Data must be ordered by the designated timestamp column
+- The window is calculated based on the values in that ORDER BY column
-Defines the frame based on logical intervals of values in the ORDER BY column:
+For example, with a current row at 09:04 and `RANGE BETWEEN '1' MINUTE PRECEDING AND CURRENT ROW`:
+- Only includes rows with timestamps between 09:03 and 09:04 (inclusive)
+- Earlier rows (e.g., 09:00, 09:02) are excluded as they fall outside the 1-minute range
```mermaid
sequenceDiagram
@@ -206,12 +210,18 @@ sequenceDiagram
participant R3 as Row at 09:03
participant R4 as Row at 09:04
(Current Row)
+ Note over R4: Calculating at 09:04
+
+ %% Only include rows within 1 minute of current row (09:03-09:04)
rect rgba(255, 223, 191)
Note over R3,R4: RANGE BETWEEN
'1' MINUTE PRECEDING
AND CURRENT ROW
end
+
+ %% Show excluded rows in grey or with a visual indicator
+ Note over R1,R2: Outside 1-minute range
```
-The time units that can be used in window functions are:
+The following time units can be used in RANGE window functions:
- day
- hour
@@ -220,23 +230,23 @@ The time units that can be used in window functions are:
- millisecond
- microsecond
-Plural forms of these time units are also accepted.
+Plural forms of these time units are also accepted (e.g., 'minutes', 'hours').
```questdb-sql title="Multiple time intervals example" demo
SELECT
timestamp,
bid_px_00,
- -- 5-minute average
+ -- 5-minute average: includes rows from (current_timestamp - 5 minutes) to current_timestamp
AVG(bid_px_00) OVER (
ORDER BY timestamp
RANGE BETWEEN '5' MINUTE PRECEDING AND CURRENT ROW
) AS avg_5min,
- -- 100-millisecond count
+ -- 100ms count: includes rows from (current_timestamp - 100ms) to current_timestamp
COUNT(*) OVER (
ORDER BY timestamp
RANGE BETWEEN '100' MILLISECOND PRECEDING AND CURRENT ROW
) AS updates_100ms,
- -- 2-second sum
+ -- 2-second sum: includes rows from (current_timestamp - 2 seconds) to current_timestamp
SUM(bid_sz_00) OVER (
ORDER BY timestamp
RANGE BETWEEN '2' SECOND PRECEDING AND CURRENT ROW
@@ -251,6 +261,8 @@ This query demonstrates different time intervals in action, calculating:
- Update frequency in 100ms windows
- 2-second rolling volume
+Note that each window calculation is based on the timestamp values, not the number of rows. This means the number of rows included can vary depending on how many records exist within each time interval.
+
## Frame boundaries
Frame boundaries determine which rows are included in the window calculation:
@@ -512,6 +524,84 @@ SELECT
FROM trades;
```
+### max()
+
+In the context of window functions, `max(value)` calculates the maximum value within the set of rows defined by the window frame.
+
+**Arguments:**
+
+- `value`: Any numeric value.
+
+**Return value:**
+
+- The maximum value (excluding null) for the rows in the window frame.
+
+**Description**
+
+When used as a window function, `max()` operates on a "window" of rows defined by the `OVER` clause. The rows in this window are determined by the `PARTITION BY`, `ORDER BY`, and frame specification components of the `OVER` clause.
+
+The `max()` function respects the frame clause, meaning it only includes rows within the specified frame in the calculation. The result is a separate value for each row, based on the corresponding window of rows.
+
+Note that the order of rows in the result set is not guaranteed to be the same with each execution of the query. To ensure a consistent order, use an `ORDER BY` clause outside of the `OVER` clause.
+
+**Syntax:**
+```questdb-sql title="max() syntax"
+max(value) OVER (window_definition)
+```
+
+**Example:**
+```questdb-sql title="max() example" demo
+SELECT
+ symbol,
+ price,
+ timestamp,
+ max(price) OVER (
+ PARTITION BY symbol
+ ORDER BY timestamp
+ ROWS BETWEEN 3 PRECEDING AND CURRENT ROW
+ ) AS highest_price
+FROM trades;
+```
+
+### min()
+
+In the context of window functions, `min(value)` calculates the minimum value within the set of rows defined by the window frame.
+
+**Arguments:**
+
+- `value`: Any numeric value.
+
+**Return value:**
+
+- The minimum value (excluding null) for the rows in the window frame.
+
+**Description**
+
+When used as a window function, `min()` operates on a "window" of rows defined by the `OVER` clause. The rows in this window are determined by the `PARTITION BY`, `ORDER BY`, and frame specification components of the `OVER` clause.
+
+The `min()` function respects the frame clause, meaning it only includes rows within the specified frame in the calculation. The result is a separate value for each row, based on the corresponding window of rows.
+
+Note that the order of rows in the result set is not guaranteed to be the same with each execution of the query. To ensure a consistent order, use an `ORDER BY` clause outside of the `OVER` clause.
+
+**Syntax:**
+```questdb-sql title="min() syntax"
+min(value) OVER (window_definition)
+```
+
+**Example:**
+```questdb-sql title="min() example" demo
+SELECT
+ symbol,
+ price,
+ timestamp,
+ min(price) OVER (
+ PARTITION BY symbol
+ ORDER BY timestamp
+ ROWS BETWEEN 3 PRECEDING AND CURRENT ROW
+ ) AS lowest_price
+FROM trades;
+```
+
### rank()
In the context of window functions, `rank()` assigns a unique rank to each row
@@ -777,7 +867,7 @@ This example:
- Can be used with any ORDER BY column
- RANGE frames:
- - Based on logical intervals
+ - Defines the frame based on the actual values in the ORDER BY column, rather than counted row.
- Require ORDER BY on timestamp
- Support time-based intervals (e.g., '1h', '5m')
diff --git a/documentation/reference/sql/acl/create-service-account.md b/documentation/reference/sql/acl/create-service-account.md
index 7c38d074..2ccb0acc 100644
--- a/documentation/reference/sql/acl/create-service-account.md
+++ b/documentation/reference/sql/acl/create-service-account.md
@@ -39,9 +39,12 @@ Note that new service accounts can only access the database if the necessary
granted.
The user creating the service account automatically receives the
-`ASSUME SERVICE ACCOUNT` permission, unless the `OWNED BY` clause is present, in
-which case the permission is granted to the user or group specified in the
-clause.
+`ASSUME SERVICE ACCOUNT` permission with `GRANT` option, unless the `OWNED BY`
+clause is present, in which case the permission is granted to the user or
+group specified in the clause.
+
+The `OWNED BY` clause cannot be omitted if the service account is created by
+an external user, because permissions cannot be granted to them.
## Examples
diff --git a/documentation/reference/sql/alter-table-add-column.md b/documentation/reference/sql/alter-table-add-column.md
index 5049b011..62ffea94 100644
--- a/documentation/reference/sql/alter-table-add-column.md
+++ b/documentation/reference/sql/alter-table-add-column.md
@@ -29,13 +29,17 @@ first failure. It is therefore possible to add some columns and not others.
_Enterprise only._
-When a user adds a column to a table, they automatically get permissions for
-that column. However, if the `OWNED BY` clause is used, the permissions instead
-go to the user, group, or service account named in that clause.
+When a user adds a new column to a table, they automatically get all column
+level permissions with the `GRANT` option for that column.
+However, if the `OWNED BY` clause is used, the permissions instead go to the
+user, group, or service account named in that clause.
+
+The `OWNED BY` clause cannot be omitted if the column is added by an external
+user, because permissions cannot be granted to them.
## Examples
-Add a new column called `comment` of type `STRING` type to the table `ratings`
+Add a new column called `comment` of `STRING` type to the table `ratings`
```questdb-sql title="New column"
ALTER TABLE ratings ADD COLUMN comment STRING;
diff --git a/documentation/reference/sql/create-table.md b/documentation/reference/sql/create-table.md
index b4db4d29..947a47b0 100644
--- a/documentation/reference/sql/create-table.md
+++ b/documentation/reference/sql/create-table.md
@@ -307,9 +307,13 @@ information about indexes.
_Enterprise only._
-When a user adds a column to a table, they automatically get permissions for
-that column. However, if the `OWNED BY` clause is used, the permissions instead
-go to the user, group, or service account named in that clause.
+When a user creates a new table, they automatically get all table level
+permissions with the `GRANT` option for that table.
+However, if the `OWNED BY` clause is used, the permissions instead go
+to the user, group, or service account named in that clause.
+
+The `OWNED BY` clause cannot be omitted if the table is created by an
+external user, because permissions cannot be granted to them.
```questdb-sql
CREATE GROUP analysts;
diff --git a/documentation/sidebars.js b/documentation/sidebars.js
index 04fc5367..b06ba859 100644
--- a/documentation/sidebars.js
+++ b/documentation/sidebars.js
@@ -80,14 +80,7 @@ module.exports = {
label: "Message Brokers",
collapsed: false,
items: [
- {
- label: "Kafka",
- type: "category",
- items: [
- "third-party-tools/kafka/overview",
- "third-party-tools/kafka/questdb-kafka",
- ],
- },
+ "third-party-tools/kafka",
"third-party-tools/telegraf",
"third-party-tools/redpanda",
"third-party-tools/flink",
@@ -274,6 +267,7 @@ module.exports = {
"reference/function/conditional",
"reference/function/date-time",
"reference/function/finance",
+ "reference/function/hash",
"reference/function/meta",
"reference/function/numeric",
"reference/function/parquet",
@@ -442,30 +436,36 @@ module.exports = {
type: "doc",
id: "third-party-tools/overview",
},
- "third-party-tools/airbyte",
- "third-party-tools/cube",
- "third-party-tools/databento",
- "third-party-tools/embeddable",
- "third-party-tools/flink",
- "third-party-tools/grafana",
{
- label: "Kafka",
type: "category",
+ label: "Recommended",
+ collapsed: false,
items: [
- "third-party-tools/kafka/overview",
- "third-party-tools/kafka/questdb-kafka",
+ "third-party-tools/databento",
+ "third-party-tools/grafana",
+ "third-party-tools/kafka",
+ "third-party-tools/redpanda",
+ "third-party-tools/pandas",
+ "third-party-tools/prometheus",
+ "third-party-tools/qstudio",
+ "third-party-tools/superset",
+ "third-party-tools/flink",
+ ],
+ },
+ {
+ label: "Other tools",
+ type: "category",
+ collapsed: false,
+ items: [
+ "third-party-tools/sqlalchemy",
+ "third-party-tools/telegraf",
+ "third-party-tools/mindsdb",
+ "third-party-tools/spark",
+ "third-party-tools/cube",
+ "third-party-tools/embeddable",
+ "third-party-tools/airbyte",
],
},
- "third-party-tools/mindsdb",
- "third-party-tools/pandas",
- "third-party-tools/prometheus",
- "third-party-tools/qstudio",
- "third-party-tools/redpanda",
- "third-party-tools/redpanda-connect",
- "third-party-tools/spark",
- "third-party-tools/sqlalchemy",
- "third-party-tools/superset",
- "third-party-tools/telegraf",
],
},
{
diff --git a/documentation/third-party-tools/kafka/questdb-kafka.md b/documentation/third-party-tools/kafka.md
similarity index 89%
rename from documentation/third-party-tools/kafka/questdb-kafka.md
rename to documentation/third-party-tools/kafka.md
index a67f904c..95c21283 100644
--- a/documentation/third-party-tools/kafka/questdb-kafka.md
+++ b/documentation/third-party-tools/kafka.md
@@ -1,16 +1,72 @@
---
-title: QuestDB Kafka Connector
-description:
- QuestDB ships a QuestDB Kafka connector for ingesting messages from Kafka via
- the InfluxDB Line Protocol.
+title: Ingestion from Kafka Overview
+sidebar_label: Kafka
+description: Apache Kafka and QuestDB Kafka Connector overview and guide. Thorough explanations and examples.
---
-import Screenshot from "@theme/Screenshot"
-
-QuestDB ships a
-[QuestDB Kafka connector](https://github.com/questdb/kafka-questdb-connector)
-for fast ingestion from Kafka into QuestDB. This is also useful for processing
-[change data capture](/glossary/change-data-capture/) for the dataflow. The
+Kafka is a fault-tolerant message broker that excels at streaming. Its ecosystem
+provides tooling which - given the popularity of Kafka - can be used in
+alternative services and tools like Redpanda, similar to how QuestDB supports
+the InfluxDB Line Protocol.
+
+1. Apply the Kafka Connect based
+ [QuestDB Kafka connector](#questdb-kafka-connect-connector)
+ - **Recommended for most people!**
+2. Write a
+ [custom program](#customized-program)
+ to read data from Apache Kafka and write to QuestDB
+3. Use a
+ [stream processing](#stream-processing)
+ engine
+
+Each strategy has different trade-offs.
+
+The rest of this section discusses each strategy and guides users who are
+already familiar with the Kafka ecosystem.
+
+## Customized program
+
+Writing a dedicated program reading from Kafka topics and writing to QuestDB
+tables offers great flexibility. The program can do arbitrary data
+transformations and filtering, including stateful operations.
+
+On the other hand, it's the most complex strategy to implement. You'll have to
+deal with different serialization formats, handle failures, etc. This strategy
+is recommended for very advanced use cases only.
+
+_Not recommended for most people._
+
+## Stream processing
+
+[Stream processing](/glossary/stream-processing/) engines provide a middle
+ground between writing a dedicated program and using one of the connectors.
+Engines such as [Apache Flink](https://flink.apache.org/) provide rich API for
+data transformations, enrichment, and filtering; at the same time, they can help
+you with shared concerns such as fault-tolerance and serialization. However,
+they often have a non-trivial learning curve.
+
+QuestDB offers a [connector for Apache Flink](/docs/third-party-tools/flink/).
+It is the recommended strategy if you are an existing Flink user, and you need
+to do complex transformations while inserting entries from Kafka into QuestDB.
+
+## QuestDB Kafka Connect connector
+
+**Recommended for most people!**
+
+QuestDB develops a first-party
+[QuestDB Kafka connector](https://github.com/questdb/kafka-questdb-connector). The
+connector is built on top of the
+[Kafka Connect framework](https://docs.confluent.io/platform/current/connect/index.html)
+and uses the InfluxDB Line Protocol for communication with QuestDB. Kafka
+Connect handles concerns such as fault tolerance and serialization. It also
+provides facilities for message transformations, filtering and so on. This is also useful
+for processing [change data capture](/glossary/change-data-capture/) for the dataflow.
+
+The underlying InfluxDB Line Protocol ensures operational simplicity and
+excellent performance. It can comfortably insert over 100,000s of rows per
+second. Leveraging Apache Connect also allows QuestDB to connect with
+Kafka-compatible applications like
+[Redpanda](/docs/third-party-tools/redpanda/). The
connector is based on the
[Kafka Connect framework](https://kafka.apache.org/documentation/#connect) and
acts as a sink for Kafka topics.
@@ -21,7 +77,7 @@ This page has the following main sections:
- [Connector Configuration manual](#configuration-manual)
- [FAQ](#faq)
-## Integration guide
+### Integration guide
This guide shows the steps to use the QuestDB Kafka connector to read JSON data
from Kafka topics and write them as rows into a QuestDB table. For Confluent
@@ -172,7 +228,7 @@ It includes a
[sample integration](https://github.com/questdb/kafka-questdb-connector/tree/main/kafka-questdb-connector-samples/stocks)
with [Debezium](https://debezium.io/) for CDC from PostgreSQL.
-## Configuration manual
+### Configuration manual
This section lists configuration options as well as further information about
the Kafka Connect connector.
@@ -398,7 +454,6 @@ Note that deduplication requires designated timestamps extracted either from
message payload or Kafka message metadata. See the
[Designated timestamps](#designated-timestamps) section for more information.
-
#### Dead Letter Queue
When messages cannot be processed due to non-recoverable errors, such as invalid data formats or schema mismatches, the
@@ -543,7 +598,7 @@ In production, it's recommended to use the SQL
because it gives you more control over the table schema, allowing per-table
[partitioning](/glossary/database-partitioning/), creating indexes, etc.
-## FAQ
+### FAQ
Does this connector work with Schema Registry?
@@ -654,7 +709,7 @@ issues. If you do, please report them to us.
-## See also
+### See also
- [Change Data Capture with QuestDB and Debezium](/blog/2023/01/03/change-data-capture-with-questdb-and-debezium)
- [Realtime crypto tracker with QuestDB Kafka Connector](/blog/realtime-crypto-tracker-with-questdb-kafka-connector)
diff --git a/documentation/third-party-tools/kafka/overview.md b/documentation/third-party-tools/kafka/overview.md
deleted file mode 100644
index 92a320c8..00000000
--- a/documentation/third-party-tools/kafka/overview.md
+++ /dev/null
@@ -1,70 +0,0 @@
----
-title: Ingestion from Kafka Overview
-sidebar_label: Overview
-description: Apache Kafka integration overview.
----
-
-Kafka is a fault-tolerant message broker that excels at streaming. Its ecosystem
-provides tooling which - given the popularity of Kafka - can be used in
-alternative services and tools like Redpanda, similar to how QuestDB supports
-the InfluxDB Line Protocol.
-
-1. Apply the Kafka Connect based
- [QuestDB Kafka connector](/docs/third-party-tools/kafka/overview/#questdb-connector)
-2. Write a
- [custom program](/docs/third-party-tools/kafka/overview/#customized-program)
- to read data from Apache Kafka and write to QuestDB
-3. Use a
- [stream processing](/docs/third-party-tools/kafka/overview/#stream-processing)
- engine
-
-Each strategy has different trade-offs.
-
-The rest of this section discusses each strategy and guides users who are
-already familiar with the Kafka ecosystem.
-
-### QuestDB Kafka Connect connector
-
-**Recommended for most people!**
-
-QuestDB develops a first-party
-[QuestDB Kafka connector](/docs/third-party-tools/kafka/questdb-kafka/). The
-connector is built on top of the
-[Kafka Connect framework](https://docs.confluent.io/platform/current/connect/index.html)
-and uses the InfluxDB Line Protocol for communication with QuestDB. Kafka
-Connect handles concerns such as fault tolerance and serialization. It also
-provides facilities for message transformations, filtering and so on.
-
-The underlying InfluxDB Line Protocol ensures operational simplicity and
-excellent performance. It can comfortably insert over 100,000s of rows per
-second. Leveraging Apache Connect also allows QuestDB to connect with
-Kafka-compatible applications like
-[Redpanda](/docs/third-party-tools/redpanda/).
-
-Read [our QuestDB Kafka connector](/docs/third-party-tools/kafka/questdb-kafka/)
-guide to get started.
-
-### Customized program
-
-Writing a dedicated program reading from Kafka topics and writing to QuestDB
-tables offers great flexibility. The program can do arbitrary data
-transformations and filtering, including stateful operations.
-
-On the other hand, it's the most complex strategy to implement. You'll have to
-deal with different serialization formats, handle failures, etc. This strategy
-is recommended for very advanced use cases only.
-
-_Not recommended for most people._
-
-### Stream processing
-
-[Stream processing](/glossary/stream-processing/) engines provide a middle
-ground between writing a dedicated program and using one of the connectors.
-Engines such as [Apache Flink](https://flink.apache.org/) provide rich API for
-data transformations, enrichment, and filtering; at the same time, they can help
-you with shared concerns such as fault-tolerance and serialization. However,
-they often have a non-trivial learning curve.
-
-QuestDB offers a [connector for Apache Flink](/docs/third-party-tools/flink/).
-It is the recommended strategy if you are an existing Flink user, and you need
-to do complex transformations while inserting entries from Kafka into QuestDB.
diff --git a/documentation/third-party-tools/overview.md b/documentation/third-party-tools/overview.md
index 4199d034..58e23a52 100644
--- a/documentation/third-party-tools/overview.md
+++ b/documentation/third-party-tools/overview.md
@@ -25,7 +25,7 @@ platforms:
Ingest, store, and process high-throughput and real-time data streams with these
integrations:
-- **[Apache Kafka](/docs/third-party-tools/kafka/overview/):** A distributed
+- **[Apache Kafka](/docs/third-party-tools/kafka):** A distributed
event streaming platform for high-throughput data pipelines.
- [Telegraf](/docs/third-party-tools/telegraf/): Collect and report metrics from
various sources.
diff --git a/documentation/third-party-tools/redpanda-connect.md b/documentation/third-party-tools/redpanda-connect.md
deleted file mode 100644
index 39dcbddb..00000000
--- a/documentation/third-party-tools/redpanda-connect.md
+++ /dev/null
@@ -1,77 +0,0 @@
----
-title: Redpanda Connect (Benthos)
-description: Redpanda Connect, formerly known as Benthos, ships with a QuestDB
- output component that can be used as a sink for your stream processing data
----
-
-import Screenshot from "@theme/Screenshot"
-
-## Integration guide
-
-Redpanda Connect is a stream processing tool that can be used to build data pipelines.
-It's a lightweight alternative to [Apache Kafka Connect](/docs/third-party-tools/kafka/questdb-kafka/).
-This guide shows the steps to use the Redpanda Connect to write JSON data
-as rows into a QuestDB table.
-
-### Prerequisites
-
-You will need the following:
-
-- [Redpanda Connect](https://docs.redpanda.com/redpanda-connect/about/)
-- A running QuestDB instance
-
-### Download Redpanda Connect
-
-The QuestDB output component was added to Redpanda Connect in version v4.37.0.
-
-To download the latest version of Redpanda Connect, follow the [installation instructions](https://docs.redpanda.com/redpanda-connect/guides/getting_started/#install) in the official documentation.
-
-### Configure Redpanda Connect
-
-One of Redpanda Connect's strengths is the ability to configure an entire data pipeline in a single
-yaml file. We will create a simple configuration to demonstrate the QuestDB connector's capabilities
-by using a straightforward input source.
-
-Create this file and name it `config.yaml` in your current directory
-
-```yaml
-input:
- stdin: {}
-
-output:
- questdb:
- address: localhost:9000
- table: redpanda_connect_demo
- doubles:
- - price
- designated_timestamp_field: timestamp
-```
-
-This configuration will read lines from stdin and publish them to your running QuestDB instance
-
-### Run Redpanda Connect and publish messages
-
-Run the following command to send some messages to QuestDB through Redpanda Connect
-
-```bash
-echo \
-'{"symbol": "AAPL", "price": 225.83, "timestamp": 1727294094}
-{"symbol": "MSFT", "price": 431.78, "timestamp": 1727294142}' \
-| rpk connect run config.yaml
-```
-
-The command above sends two JSON messages to Redpanda Connect standard input, which then writes them to QuestDB.
-
-### Verify the integration
-
-Navigate to the QuestDB Web Console at http://localhost:9000 and run the following query to see your data:
-
-```sql
-SELECT *
-FROM redpanda_connect_demo
-```
-
-### Next steps
-
-Explore Redpanda Connect's [official documentation](https://docs.redpanda.com/redpanda-connect/about/) to learn more
-about its capabilities and how to use it in your projects.
diff --git a/documentation/third-party-tools/redpanda.md b/documentation/third-party-tools/redpanda.md
index 42dee998..94a759b3 100644
--- a/documentation/third-party-tools/redpanda.md
+++ b/documentation/third-party-tools/redpanda.md
@@ -1,22 +1,27 @@
---
title: Redpanda
description:
- Guide for using Redpanda with QuestDB via the QuestDB Kafka connector
+ Guide for using Redpanda with QuestDB via the QuestDB Kafka connector. Also
+ covers Redpanda Connect, a stream processing tool that can be used to build
+ data pipelines.
---
[Redpanda](https://redpanda.com/) is an open-source, Kafka-compatible streaming
platform that uses C++ and Raft to replace Java and Zookeeper. Since it is Kafka
compatible, it can be used with the
-[QuestDB Kafka connector](/docs/third-party-tools/kafka/questdb-kafka/),
+[QuestDB Kafka connector](/docs/third-party-tools/kafka/#questdb-kafka-connect-connector),
providing an alternative data [streaming](/glossary/stream-processing) option.
-## Prerequisites
+This guide also covers [Redpanda Connect](#redpanda-connect), a stream processing
+tool that can be used to build data pipelines.
+
+### Prerequisites
- Docker
- A local JDK installation
- A running QuestDB instance
-## Configure and start Redpanda
+### Configure and start Redpanda
The Redpanda
[Quick start guide](https://docs.redpanda.com/docs/get-started/quick-start/quick-start-docker/#start-redpanda)
@@ -118,7 +123,7 @@ docker compose up
It also start the
[Redpanda web UI](https://docs.redpanda.com/docs/get-started/quick-start/quick-start-docker/#explore-your-topic-in-redpanda-console).
-## Download Apache Kafka
+### Download Apache Kafka
Download
[Apache Kafka](https://downloads.apache.org/kafka/3.7.0/kafka_2.12-3.7.0.tgz)
@@ -127,7 +132,7 @@ and unzip the file.
This step is required as Redpanda does not have its own Kafka Connect
equivalent.
-## Download the QuestDB Kafka connector
+### Download the QuestDB Kafka connector
Download
[the QuestDB Kafka connector](https://github.com/questdb/kafka-questdb-connector/releases/latest),
@@ -158,7 +163,7 @@ cp ./*.jar /path/to/kafka/libs
There should be already a lot of other JAR files. That's how you can tell you
are in the right directory.
-## Configure properties
+### Configure properties
Go to /path/to/kafka/config - there should be already quite a few \*.property
files. Create a new file: `questdb-connector.properties` with the following
@@ -189,7 +194,7 @@ with the Redpanda broker URL:
bootstrap.servers=127.0.0.1:19092
```
-## Start Kafka Connect
+### Start Kafka Connect
Navigate to the Kafka Connect folder and then run:
@@ -199,7 +204,7 @@ Navigate to the Kafka Connect folder and then run:
Now the Kafka Connect is initiated.
-## Send a message
+### Send a message
Open the [Redpanda UI topic page](http://127.0.0.1:8080/topics). It should
display `example-topic`:
@@ -223,7 +228,7 @@ Paste the following message into the message box:
Then, click 'Publish'.
-## See result from QuestDB
+### See result from QuestDB
Go to QuestDB web console at [http://localhost:9000](http://localhost:9000). Run
a `SELECT` query:
@@ -236,7 +241,7 @@ The message is delivered to QuestDB:
![QuestDB web console result showing the Redpanda message](/images/docs/guide/redpanda/questdb-select.webp)
-## Summary and next steps
+### Summary and next steps
The guide demonstrates how to use Redpanda with the QuestDB Kafka connector. The
connector implicitly creates a table in QuestDB with inferred schema from the
@@ -248,7 +253,7 @@ from the Kafka message metadata.
The connector can be also configured to use a custom timestamp field from the
Kafka message. See the
-[QuestDB Kafka Connector reference manual](/docs/third-party-tools/kafka/questdb-kafka#designated-timestamps)
+[QuestDB Kafka Connector reference manual](/docs/third-party-tools/kafka/#designated-timestamps)
for details.
A possible improvement could be to explicitly create the target table in QuestDB
@@ -256,10 +261,80 @@ instead of relying on the connector to create it implicitly. This way, you can
control the schema, [partitioning](/glossary/database-partitioning/) and data
types of the table. It also enables QuestDB's native
[deduplication feature](/docs/concept/deduplication). Deduplication is required
-for [Exactly-Once](/docs/third-party-tools/kafka/questdb-kafka#fault-tolerance)
+for [Exactly-Once](/docs/third-party-tools/kafka/#fault-tolerance)
processing semantics.
## See also
-- [QuestDB Kafka Connector reference manual](/docs/third-party-tools/kafka/questdb-kafka/#configuration-manual)
+- [QuestDB Kafka Connector reference manual](/docs/third-party-tools/kafka/#configuration-manual)
- [How to build a real-time crypto tracker with Redpanda and QuestDB](https://redpanda.com/blog/real-time-crypto-tracker-questdb-redpanda)
+
+## Redpanda Connect
+
+Redpanda Connect is a stream processing tool that can be used to build data pipelines.
+It's a lightweight alternative to [Apache Kafka Connect](/docs/third-party-tools/kafka/#questdb-kafka-connect-connector).
+This guide shows the steps to use the Redpanda Connect to write JSON data
+as rows into a QuestDB table.
+
+### Prerequisites
+
+You will need the following:
+
+- [Redpanda Connect](https://docs.redpanda.com/redpanda-connect/about/)
+- A running QuestDB instance
+
+### Download Redpanda Connect
+
+The QuestDB output component was added to Redpanda Connect in version v4.37.0.
+
+To download the latest version of Redpanda Connect, follow the [installation instructions](https://docs.redpanda.com/redpanda-connect/guides/getting_started/#install) in the official documentation.
+
+### Configure Redpanda Connect
+
+One of Redpanda Connect's strengths is the ability to configure an entire data pipeline in a single
+yaml file. We will create a simple configuration to demonstrate the QuestDB connector's capabilities
+by using a straightforward input source.
+
+Create this file and name it `config.yaml` in your current directory
+
+```yaml
+input:
+ stdin: {}
+
+output:
+ questdb:
+ address: localhost:9000
+ table: redpanda_connect_demo
+ doubles:
+ - price
+ designated_timestamp_field: timestamp
+```
+
+This configuration will read lines from stdin and publish them to your running QuestDB instance
+
+### Run Redpanda Connect and publish messages
+
+Run the following command to send some messages to QuestDB through Redpanda Connect
+
+```bash
+echo \
+'{"symbol": "AAPL", "price": 225.83, "timestamp": 1727294094}
+{"symbol": "MSFT", "price": 431.78, "timestamp": 1727294142}' \
+| rpk connect run config.yaml
+```
+
+The command above sends two JSON messages to Redpanda Connect standard input, which then writes them to QuestDB.
+
+### Verify the integration
+
+Navigate to the QuestDB Web Console at http://localhost:9000 and run the following query to see your data:
+
+```sql
+SELECT *
+FROM redpanda_connect_demo
+```
+
+### Next steps
+
+Explore Redpanda Connect's [official documentation](https://docs.redpanda.com/redpanda-connect/about/) to learn more
+about its capabilities and how to use it in your projects.
diff --git a/docusaurus.config.js b/docusaurus.config.js
index 9088c50f..640f3b36 100644
--- a/docusaurus.config.js
+++ b/docusaurus.config.js
@@ -153,6 +153,7 @@ const config = {
// Disable /search page
searchPagePath: false,
contextualSearch: false,
+ externalUrlRegex: 'questdb\\.io',
},
}
: {}),
diff --git a/netlify.toml b/netlify.toml
index b1a41866..4bfe76b0 100644
--- a/netlify.toml
+++ b/netlify.toml
@@ -423,12 +423,16 @@ from = "/docs/reference/sql/timestamp"
to = "/docs/reference/function/timestamp/"
[[redirects]]
-from = "/docs/third-party-tools/kafka/"
-to = "/docs/third-party-tools/kafka/overview/"
+from = "/docs/third-party-tools/kafka/overview/"
+to = "/docs/third-party-tools/kafka/"
+
+[[redirects]]
+from = "/docs/third-party-tools/kafka/questdb-kafka/"
+to = "/docs/third-party-tools/kafka/#questdb-kafka-connect-connector"
[[redirects]]
from = "/docs/third-party-tools/kafka/jdbc"
-to = "/docs/third-party-tools/kafka/overview/"
+to = "/docs/third-party-tools/kafka/"
[[redirects]]
from = "/blog/laoding-pandas-dataframes/"
diff --git a/package.json b/package.json
index 0ce783ff..c9eb31f5 100644
--- a/package.json
+++ b/package.json
@@ -8,7 +8,8 @@
"prebuild": "docusaurus clear && node ./scripts/cleanup-guidelines",
"build": "cross-env NO_UPDATE_NOTIFIER=true USE_SIMPLE_CSS_MINIFIER=true PWA_SW_CUSTOM= docusaurus build",
"deploy": "docusaurus deploy",
- "serve": "docusaurus serve"
+ "serve": "docusaurus serve",
+ "swizzle": "docusaurus swizzle"
},
"dependencies": {
"@docusaurus/faster": "^3.6.3",
diff --git a/scripts/railroad.py b/scripts/railroad.py
index b4733ec0..f1961103 100644
--- a/scripts/railroad.py
+++ b/scripts/railroad.py
@@ -2,6 +2,7 @@
import subprocess
import re
from pathlib import Path
+import argparse
PROJECT_ROOT = Path(os.getcwd())
RR_WAR_PATH = PROJECT_ROOT / "rr.war"
@@ -53,37 +54,46 @@ def extract_diagrams(file_path):
print(f"Reading from file: {file_path}")
with open(file_path, 'r') as f:
- previous_line = ""
for line in f:
- line = line.rstrip()
+ line = line.rstrip()
+ # Skip empty lines and comments
if not line or line.startswith('#'):
- previous_line = ""
continue
- if '::=' in line:
- if previous_line and not previous_line.startswith('-'):
- if current_name and current_definition:
- diagrams[current_name] = '\n'.join(current_definition)
-
- current_name = previous_line.strip()
- current_definition = [f"{current_name} {line.strip()}"]
- print(f"Found diagram: {current_name}")
+ # If we find a new definition (non-indented line without ::=)
+ if not line.startswith(' ') and '::=' not in line:
+ # Save previous diagram if it exists
+ if current_name and current_definition:
+ # Format the definition properly
+ formatted_def = []
+ for def_line in current_definition:
+ if '::=' in def_line:
+ # First line needs the name
+ formatted_def.append(f"{current_name} {def_line.strip()}")
+ else:
+ formatted_def.append(def_line.strip())
+ diagrams[current_name] = '\n'.join(formatted_def)
- elif current_name and line:
- current_definition.append(line)
+ # Start new diagram
+ current_name = line.strip()
+ current_definition = []
+ continue
- previous_line = line
-
- if current_name and current_definition:
- diagrams[current_name] = '\n'.join(current_definition)
-
- print(f"\nFound {len(diagrams)} diagrams: {sorted(diagrams.keys())}")
+ # Add definition lines to current diagram
+ if current_name and line:
+ current_definition.append(line)
- if diagrams:
- first_key = sorted(diagrams.keys())[0]
- print(f"\nFirst diagram '{first_key}' content:")
- print(diagrams[first_key])
+ # Save the last diagram
+ if current_name and current_definition:
+ formatted_def = []
+ for def_line in current_definition:
+ if '::=' in def_line:
+ # First line needs the name
+ formatted_def.append(f"{current_name} {def_line.strip()}")
+ else:
+ formatted_def.append(def_line.strip())
+ diagrams[current_name] = '\n'.join(formatted_def)
return diagrams
@@ -171,34 +181,48 @@ def inject_custom_style(svg_path):
f.write(final_svg)
def main():
+ # Add argument parsing
+ parser = argparse.ArgumentParser(description='Generate railroad diagrams')
+ parser.add_argument('diagram_name', nargs='?', help='Optional specific diagram name to generate')
+ args = parser.parse_args()
+
temp_dir = PROJECT_ROOT / "temp_grammar"
temp_dir.mkdir(exist_ok=True)
print(f"Created temp directory: {temp_dir}")
markdown_syntax_list = []
-
+ processed_diagrams = set()
+ orphaned_diagrams = []
+
try:
diagrams = extract_diagrams(INPUT_FILE)
+ if args.diagram_name:
+ if args.diagram_name not in diagrams:
+ print(f"Error: Diagram '{args.diagram_name}' not found in .railroad file")
+ return
+ # Process only the specified diagram
+ diagrams = {args.diagram_name: diagrams[args.diagram_name]}
+
for name, definition in diagrams.items():
print(f"\nProcessing diagram: {name}")
+ processed_diagrams.add(name)
output_path = OUTPUT_DIR / f"{name}.svg"
- if output_path.exists():
- print(f"Skipping existing diagram: {name}")
- continue
-
try:
svg_path = generate_svg(name, definition, temp_dir)
-
inject_custom_style(svg_path)
-
print(f"Successfully generated: {name}.svg")
-
markdown_syntax_list.append(f"![Diagram for {name}](/images/docs/diagrams/{name}.svg)")
-
except Exception as e:
print(f"Error processing {name}: {str(e)}")
+
+ # Only check for orphaned diagrams if we're processing all diagrams
+ if not args.diagram_name:
+ for svg_file in OUTPUT_DIR.glob("*.svg"):
+ diagram_name = svg_file.stem
+ if diagram_name not in processed_diagrams:
+ orphaned_diagrams.append(diagram_name)
finally:
print("\nCleaning up...")
@@ -212,6 +236,11 @@ def main():
print("\nCopy the image syntax below and paste it into your markdown file:")
for syntax in markdown_syntax_list:
print(syntax)
+
+ if orphaned_diagrams:
+ print("\nFound orphaned diagrams (these exist as SVGs but have no matching syntax):")
+ for diagram in sorted(orphaned_diagrams):
+ print(f"- {diagram}")
if __name__ == "__main__":
main()
\ No newline at end of file
diff --git a/src/theme/SearchBar.tsx b/src/theme/SearchBar.tsx
new file mode 100644
index 00000000..374fc893
--- /dev/null
+++ b/src/theme/SearchBar.tsx
@@ -0,0 +1,45 @@
+import { useEffect } from 'react';
+import SearchBar from '@theme-original/SearchBar';
+import type SearchBarType from '@theme/SearchBar';
+import type {WrapperProps} from '@docusaurus/types';
+
+type Props = WrapperProps;
+
+// Not anyones favourite, but this prevents
+// search results from being opened in a new tab
+
+export default function SearchBarWrapper(props: Props): JSX.Element {
+ useEffect(() => {
+ const handleSearchResults = () => {
+ const searchResults = document.querySelector('.DocSearch-content');
+ if (searchResults) {
+ const links = searchResults.getElementsByTagName('a');
+ Array.from(links).forEach(link => {
+ link.setAttribute('target', '_blank');
+ link.setAttribute('rel', 'noopener noreferrer');
+ });
+ }
+ };
+
+ const observer = new MutationObserver((mutations) => {
+ mutations.forEach((mutation) => {
+ if (mutation.addedNodes.length) {
+ handleSearchResults();
+ }
+ });
+ });
+
+ observer.observe(document.body, {
+ childList: true,
+ subtree: true,
+ });
+
+ return () => observer.disconnect();
+ }, []);
+
+ return (
+ <>
+
+ >
+ );
+}
diff --git a/static/images/docs/diagrams/.railroad b/static/images/docs/diagrams/.railroad
index 402984c7..96f89714 100644
--- a/static/images/docs/diagrams/.railroad
+++ b/static/images/docs/diagrams/.railroad
@@ -319,17 +319,17 @@ update
(FROM 'joinTable1' (JOIN 'joinTable2' ON 'joinCondition2')? )? (WHERE 'filter')?
show
- ::= 'SHOW' ( ('TABLES'|('COLUMNS'|'PARTITIONS') 'FROM' tableName ) | 'USER' userName? | 'USERS' | 'GROUPS' userName? | 'SERVICE' 'ACCOUNT' accountName? | 'SERVICE' 'ACCOUNTS' userName? | 'PERMISSIONS' entityName? | 'SERVER_VERSION' | 'SERVER_CONF' )
-
- - `SHOW USER` shows user secret (enterprise-only)
- - `SHOW USERS` shows all users (enterprise-only)
- - `SHOW GROUPS` shows all groups the user belongs or all groups in the system (enterprise-only)
- - `SHOW SERVER_VERSION` displays PostgreSQL compatibility version
- - `SHOW SERVICE ACCOUNTS` displays all service accounts or those assigned to the user/group (enterprise-only)
- - `SHOW SERVICE ACCOUNT` displays details of a service account (enterprise-only)
- - `SHOW PERMISSIONS` displays permissions of user, group or service account (enterprise-only)
- - `SHOW SERVER_CONF` shows the content of QuestDB's server.conf configuration file. (enterprise-only)
-
+ ::= 'SHOW' ( ('TABLES'|('COLUMNS'|'PARTITIONS') 'FROM' tableName )
+ | 'CREATE' 'TABLE' tableName
+ | 'USER' userName?
+ | 'USERS'
+ | 'GROUPS' userName?
+ | 'SERVICE' 'ACCOUNT' accountName?
+ | 'SERVICE' 'ACCOUNTS' userName?
+ | 'PERMISSIONS' entityName?
+ | 'SERVER_VERSION'
+ | 'SERVER_CONF'
+ )
truncateTable
::= 'TRUNCATE TABLE' tableName ';'