diff --git a/examples/linearlite/.gitignore b/examples/linearlite/.gitignore new file mode 100644 index 0000000000..c05ac13594 --- /dev/null +++ b/examples/linearlite/.gitignore @@ -0,0 +1,3 @@ +dist +.env.local +db/data/ \ No newline at end of file diff --git a/examples/linearlite/.prettierrc b/examples/linearlite/.prettierrc new file mode 100644 index 0000000000..f685078fff --- /dev/null +++ b/examples/linearlite/.prettierrc @@ -0,0 +1,6 @@ +{ + "trailingComma": "es5", + "semi": false, + "tabWidth": 2, + "singleQuote": true +} diff --git a/examples/linearlite/README.md b/examples/linearlite/README.md new file mode 100644 index 0000000000..1bc029139c --- /dev/null +++ b/examples/linearlite/README.md @@ -0,0 +1,76 @@ +# Linearlite + PGlite + ElectricSQL + +This is a demo app that shows how to build a local-first app using PGlite and the ElectricSQL sync engine. + +It's an example of a team collaboration app such as Linear built using ElectricSQL - a sync engine that synchronises little subsets of your Postgres data into local apps and services. So you can have the data you need, in-sync, wherever you need it. + +It's built on top of the excellent clone of the Linear UI built by [Tuan Nguyen](https://github.com/tuan3w). + +## Setup + +This example is part of the [ElectricSQL monorepo](../..) and is designed to be built and run as part of the [pnpm workspace](https://pnpm.io/workspaces) defined in [`../../pnpm-workspace.yaml`](../../pnpm-workspace.yaml). + +Navigate to the root directory of the monorepo, e.g.: + +```shell +cd ../../ +``` + +Install and build all of the workspace packages and examples: + +```shell +pnpm install +pnpm run -r build +``` + +Navigate back to this directory: + +```shell +cd examples/linearlite +``` + +Start the example backend services using [Docker Compose](https://docs.docker.com/compose/): + +```shell +pnpm backend:up +``` + +> Note that this always stops and deletes the volumes mounted by any other example backend containers that are running or have been run before. This ensures that the example always starts with a clean database and clean disk. + +Start the write path server: + +```shell +pnpm run write-server +``` + +Now start the dev server: + +```shell +pnpm dev +``` + +When you're done, stop the backend services using: + +```shell +pnpm backend:down +``` + +## How it works + +Linearlite demonstrates a local-first architecture using ElectricSQL and PGlite. Here's how the different pieces fit together: + +### Backend Components + +1. **Postgres Database**: The source of truth, containing the complete dataset. + +2. **Electric Sync Service**: Runs in front of Postgres, managing data synchronization from it to the clients. Preduces replication streams for a subset of the database called "shapes". + +3. **Write Server**: A simple HTTP server that handles write operations, applying them to the Postgres database. + +### Frontend Components + +1. **PGlite**: An in-browser database that stores a local copy of the data, enabling offline functionality and fast queries. + +2. **PGlite + Electric Sync Plugin**: Connects PGlite to the Electric sync service and loads the data into the local database. + +3. **React Frontend**: A Linear-inspired UI that interacts directly with the local database. diff --git a/examples/linearlite/backend/docker-compose.yml b/examples/linearlite/backend/docker-compose.yml new file mode 100644 index 0000000000..15fd2c886b --- /dev/null +++ b/examples/linearlite/backend/docker-compose.yml @@ -0,0 +1,30 @@ +version: "3.3" +name: "pglite-linearlite" + +services: + postgres: + image: postgres:16-alpine + environment: + POSTGRES_DB: linearlite + POSTGRES_USER: postgres + POSTGRES_PASSWORD: password + ports: + - 54321:5432 + volumes: + - ./postgres.conf:/etc/postgresql/postgresql.conf:ro + tmpfs: + - /var/lib/postgresql/data + - /tmp + command: + - postgres + - -c + - config_file=/etc/postgresql/postgresql.conf + + backend: + image: electricsql/electric + environment: + DATABASE_URL: postgresql://postgres:password@postgres:5432/linearlite?sslmode=disable + ports: + - 3000:3000 + depends_on: + - postgres diff --git a/examples/linearlite/backend/postgres.conf b/examples/linearlite/backend/postgres.conf new file mode 100644 index 0000000000..f28083ca8e --- /dev/null +++ b/examples/linearlite/backend/postgres.conf @@ -0,0 +1,2 @@ +listen_addresses = '*' +wal_level = logical \ No newline at end of file diff --git a/examples/linearlite/db/generate_data.js b/examples/linearlite/db/generate_data.js new file mode 100644 index 0000000000..a2e5240c92 --- /dev/null +++ b/examples/linearlite/db/generate_data.js @@ -0,0 +1,53 @@ +import { faker } from '@faker-js/faker' +import { generateNKeysBetween } from 'fractional-indexing' +import { v4 as uuidv4 } from 'uuid' + +export function generateIssues(numIssues) { + // generate properly spaced kanban keys and shuffle them + const kanbanKeys = faker.helpers.shuffle( + generateNKeysBetween(null, null, numIssues) + ) + return Array.from({ length: numIssues }, (_, idx) => + generateIssue(kanbanKeys[idx]) + ) +} + +function generateIssue(kanbanKey) { + const issueId = uuidv4() + const createdAt = faker.date.past() + return { + id: issueId, + title: faker.lorem.sentence({ min: 3, max: 8 }), + description: faker.lorem.sentences({ min: 2, max: 6 }, `\n`), + priority: faker.helpers.arrayElement([`none`, `low`, `medium`, `high`]), + status: faker.helpers.arrayElement([ + `backlog`, + `todo`, + `in_progress`, + `done`, + `canceled`, + ]), + created: createdAt.toISOString(), + modified: faker.date + .between({ from: createdAt, to: new Date() }) + .toISOString(), + kanbanorder: kanbanKey, + username: faker.internet.userName(), + comments: faker.helpers.multiple( + () => generateComment(issueId, createdAt), + { count: faker.number.int({ min: 0, max: 1 }) } + ), + } +} + +function generateComment(issueId, issueCreatedAt) { + const createdAt = faker.date.between({ from: issueCreatedAt, to: new Date() }) + return { + id: uuidv4(), + body: faker.lorem.text(), + username: faker.internet.userName(), + issue_id: issueId, + created: createdAt.toISOString(), + modified: createdAt.toISOString(), // comments are never modified + } +} diff --git a/examples/linearlite/db/load_data.js b/examples/linearlite/db/load_data.js new file mode 100644 index 0000000000..4a4f494e62 --- /dev/null +++ b/examples/linearlite/db/load_data.js @@ -0,0 +1,72 @@ +import postgres from 'postgres' +import { generateIssues } from './generate_data.js' + +if (!process.env.DATABASE_URL) { + throw new Error(`DATABASE_URL is not set`) +} + +const DATABASE_URL = process.env.DATABASE_URL +const ISSUES_TO_LOAD = process.env.ISSUES_TO_LOAD || 512 +const BATCH_SIZE = 1000 +const issues = generateIssues(ISSUES_TO_LOAD) + +console.info(`Connecting to Postgres at ${DATABASE_URL}`) +const sql = postgres(DATABASE_URL) + +async function batchInsert(sql, table, columns, dataArray, batchSize = 1000) { + for (let i = 0; i < dataArray.length; i += batchSize) { + const batch = dataArray.slice(i, i + batchSize) + + await sql` + INSERT INTO ${sql(table)} ${sql(batch, columns)} + ` + + process.stdout.write( + `Loaded ${Math.min(i + batchSize, dataArray.length)} of ${dataArray.length} ${table}s\r` + ) + } +} + +const issueCount = issues.length +let commentCount = 0 + +try { + // Process data in batches + for (let i = 0; i < issues.length; i += BATCH_SIZE) { + const issueBatch = issues.slice(i, i + BATCH_SIZE) + + await sql.begin(async (sql) => { + // Disable FK checks + await sql`SET CONSTRAINTS ALL DEFERRED` + + // Insert issues + const issuesData = issueBatch.map(({ comments: _, ...rest }) => rest) + const issueColumns = Object.keys(issuesData[0]) + await batchInsert(sql, 'issue', issueColumns, issuesData, BATCH_SIZE) + + // Insert related comments + const batchComments = issueBatch.flatMap((issue) => issue.comments) + const commentColumns = Object.keys(batchComments[0]) + await batchInsert( + sql, + 'comment', + commentColumns, + batchComments, + BATCH_SIZE + ) + + commentCount += batchComments.length + }) + + process.stdout.write( + `\nProcessed batch ${Math.floor(i / BATCH_SIZE) + 1}: ${Math.min(i + BATCH_SIZE, issues.length)} of ${issues.length} issues\n` + ) + } + + console.info(`Loaded ${issueCount} issues with ${commentCount} comments.`) +} catch (error) { + console.error('Error loading data:', error) + throw error +} finally { + await sql.end() +} diff --git a/examples/linearlite/db/migrations-client/01-create_tables.sql b/examples/linearlite/db/migrations-client/01-create_tables.sql new file mode 100644 index 0000000000..c0a5c1e4b9 --- /dev/null +++ b/examples/linearlite/db/migrations-client/01-create_tables.sql @@ -0,0 +1,290 @@ +-- # Tables and indexes +CREATE TABLE IF NOT EXISTS "issue" ( + "id" UUID NOT NULL, + "title" TEXT NOT NULL, + "description" TEXT NOT NULL, + "priority" TEXT NOT NULL, + "status" TEXT NOT NULL, + "modified" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "created" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "kanbanorder" TEXT NOT NULL, + "username" TEXT NOT NULL, + "deleted" BOOLEAN NOT NULL DEFAULT FALSE, -- Soft delete for local deletions + "new" BOOLEAN NOT NULL DEFAULT FALSE, -- New row flag for local inserts + "modified_columns" TEXT[] NOT NULL DEFAULT ARRAY[]::TEXT[], -- Columns that have been modified locally + "sent_to_server" BOOLEAN NOT NULL DEFAULT FALSE, -- Flag to track if the row has been sent to the server + "synced" BOOLEAN GENERATED ALWAYS AS (ARRAY_LENGTH(modified_columns, 1) IS NULL AND NOT deleted AND NOT new) STORED, + "backup" JSONB, -- JSONB column to store the backup of the row data for modified columns + CONSTRAINT "issue_pkey" PRIMARY KEY ("id") +); + +CREATE TABLE IF NOT EXISTS "comment" ( + "id" UUID NOT NULL, + "body" TEXT NOT NULL, + "username" TEXT NOT NULL, + "issue_id" UUID NOT NULL, + "modified" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "created" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "deleted" BOOLEAN NOT NULL DEFAULT FALSE, -- Soft delete for local deletions + "new" BOOLEAN NOT NULL DEFAULT FALSE, -- New row flag for local inserts + "modified_columns" TEXT[] NOT NULL DEFAULT ARRAY[]::TEXT[], -- Columns that have been modified locally + "sent_to_server" BOOLEAN NOT NULL DEFAULT FALSE, -- Flag to track if the row has been sent to the server + "synced" BOOLEAN GENERATED ALWAYS AS (ARRAY_LENGTH(modified_columns, 1) IS NULL AND NOT deleted AND NOT new) STORED, + "backup" JSONB, -- JSONB column to store the backup of the row data for modified columns + CONSTRAINT "comment_pkey" PRIMARY KEY ("id") +); + +CREATE INDEX IF NOT EXISTS "issue_id_idx" ON "issue" ("id"); + +CREATE INDEX IF NOT EXISTS "comment_id_idx" ON "comment" ("id"); + +-- During sync the electric.syncing config var is set to true +-- We can use this in triggers to determine the action that should be performed + +-- # Delete triggers: +-- - During sync we delete rows +-- - Otherwise we set the deleted flag to true +CREATE OR REPLACE FUNCTION handle_delete() +RETURNS TRIGGER AS $$ +DECLARE + is_syncing BOOLEAN; + bypass_triggers BOOLEAN; +BEGIN + -- Check if electric.syncing is true - defaults to false if not set + SELECT COALESCE(NULLIF(current_setting('electric.syncing', true), ''), 'false')::boolean INTO is_syncing; + -- Check if electric.bypass_triggers is true - defaults to false if not set + SELECT COALESCE(NULLIF(current_setting('electric.bypass_triggers', true), ''), 'false')::boolean INTO bypass_triggers; + + IF bypass_triggers THEN + RETURN OLD; + END IF; + + IF is_syncing THEN + -- If syncing we delete the row + RETURN OLD; + ELSE + -- For local deletions, check if the row is new + IF OLD.new THEN + -- If the row is new, just delete it + RETURN OLD; + ELSE + -- Otherwise, set the deleted flag instead of actually deleting + EXECUTE format('UPDATE %I SET deleted = true WHERE id = $1', TG_TABLE_NAME) USING OLD.id; + RETURN NULL; + END IF; + END IF; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE TRIGGER issue_delete_trigger +BEFORE DELETE ON issue +FOR EACH ROW +EXECUTE FUNCTION handle_delete(); + +CREATE OR REPLACE TRIGGER comment_delete_trigger +BEFORE DELETE ON comment +FOR EACH ROW +EXECUTE FUNCTION handle_delete(); + +-- # Insert triggers: +-- - During sync we insert rows and set modified_columns = [] +-- - Otherwise we insert rows and set modified_columns to contain the names of all +-- columns that are not local-state related + +CREATE OR REPLACE FUNCTION handle_insert() +RETURNS TRIGGER AS $$ +DECLARE + is_syncing BOOLEAN; + bypass_triggers BOOLEAN; + modified_columns TEXT[] := ARRAY[]::TEXT[]; + col_name TEXT; + new_value TEXT; + old_value TEXT; +BEGIN + -- Check if electric.syncing is true - defaults to false if not set + SELECT COALESCE(NULLIF(current_setting('electric.syncing', true), ''), 'false')::boolean INTO is_syncing; + -- Check if electric.bypass_triggers is true - defaults to false if not set + SELECT COALESCE(NULLIF(current_setting('electric.bypass_triggers', true), ''), 'false')::boolean INTO bypass_triggers; + + IF bypass_triggers THEN + RETURN NEW; + END IF; + + IF is_syncing THEN + -- If syncing, we set modified_columns to an empty array + NEW.modified_columns := ARRAY[]::TEXT[]; + NEW.new := FALSE; + NEW.sent_to_server := FALSE; + -- If the row already exists in the database, handle it as an update + EXECUTE format('SELECT 1 FROM %I WHERE id = $1', TG_TABLE_NAME) USING NEW.id INTO old_value; + IF old_value IS NOT NULL THEN + -- Apply update logic similar to handle_update function + FOR col_name IN SELECT column_name + FROM information_schema.columns + WHERE table_name = TG_TABLE_NAME AND + table_schema = TG_TABLE_SCHEMA AND + column_name NOT IN ('id', 'synced', 'modified_columns', 'backup', 'deleted', 'new', 'sent_to_server', 'search_vector') LOOP + EXECUTE format('SELECT $1.%I', col_name) USING NEW INTO new_value; + EXECUTE format('SELECT %I FROM %I WHERE id = $1', col_name, TG_TABLE_NAME) USING NEW.id INTO old_value; + IF new_value IS DISTINCT FROM old_value THEN + EXECUTE format('UPDATE %I SET %I = $1 WHERE id = $2', TG_TABLE_NAME, col_name) USING new_value, NEW.id; + END IF; + END LOOP; + -- Update modified_columns + EXECUTE format('UPDATE %I SET modified_columns = $1 WHERE id = $2', TG_TABLE_NAME) + USING ARRAY[]::TEXT[], NEW.id; + -- Update new flag + EXECUTE format('UPDATE %I SET new = $1 WHERE id = $2', TG_TABLE_NAME) + USING FALSE, NEW.id; + -- Update sent_to_server flag + EXECUTE format('UPDATE %I SET sent_to_server = $1 WHERE id = $2', TG_TABLE_NAME) + USING FALSE, NEW.id; + RETURN NULL; -- Prevent insertion of a new row + END IF; + ELSE + -- For local inserts, we add all non-local-state columns to modified_columns + SELECT array_agg(column_name) INTO modified_columns + FROM information_schema.columns + WHERE table_name = TG_TABLE_NAME + AND column_name NOT IN ('id', 'synced', 'modified_columns', 'backup', 'deleted', 'new', 'sent_to_server', 'search_vector'); + NEW.modified_columns := modified_columns; + NEW.new := TRUE; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE TRIGGER issue_insert_trigger +BEFORE INSERT ON issue +FOR EACH ROW +EXECUTE FUNCTION handle_insert(); + +CREATE OR REPLACE TRIGGER comment_insert_trigger +BEFORE INSERT ON comment +FOR EACH ROW +EXECUTE FUNCTION handle_insert(); + +-- # Update triggers: +-- - During sync: +-- - If the new modified timestamp is >= the one in the database, we apply the update, +-- set modified_columns = [], and set backup = NULL +-- - Otherwise we apply the update to columns that are NOT in modified_columns and +-- - and save the values for the non-updated columns in the backup JSONB column +-- - During a non-sync transaction: +-- - If we write over a column (that are not local-state related) that was not +-- already modified, we add that column name to modified_columns, and copy the +-- current value from the column to the backup JSONB column +-- - Otherwise we just update the column + +CREATE OR REPLACE FUNCTION handle_update() +RETURNS TRIGGER AS $$ +DECLARE + is_syncing BOOLEAN; + bypass_triggers BOOLEAN; + column_name TEXT; + old_value TEXT; + new_value TEXT; +BEGIN + -- Check if electric.syncing is true - defaults to false if not set + SELECT COALESCE(NULLIF(current_setting('electric.syncing', true), ''), 'false')::boolean INTO is_syncing; + -- Check if electric.bypass_triggers is true - defaults to false if not set + SELECT COALESCE(NULLIF(current_setting('electric.bypass_triggers', true), ''), 'false')::boolean INTO bypass_triggers; + + IF bypass_triggers THEN + RETURN NEW; + END IF; + + IF is_syncing THEN + -- During sync + IF (OLD.synced = TRUE) OR (OLD.sent_to_server = TRUE AND NEW.modified >= OLD.modified) THEN + -- Apply the update, reset modified_columns, backup, new, and sent_to_server flags + NEW.modified_columns := ARRAY[]::TEXT[]; + NEW.backup := NULL; + NEW.new := FALSE; + NEW.sent_to_server := FALSE; + ELSE + -- Apply update only to columns not in modified_columns + FOR column_name IN SELECT columns.column_name + FROM information_schema.columns + WHERE columns.table_name = TG_TABLE_NAME + AND columns.table_schema = TG_TABLE_SCHEMA + AND columns.column_name NOT IN ('id', 'synced', 'modified_columns', 'backup', 'deleted', 'new', 'sent_to_server', 'search_vector') LOOP + IF column_name != ANY(OLD.modified_columns) THEN + EXECUTE format('SELECT ($1).%I', column_name) USING NEW INTO new_value; + EXECUTE format('SELECT ($1).%I', column_name) USING OLD INTO old_value; + IF new_value IS DISTINCT FROM old_value THEN + EXECUTE format('UPDATE %I SET %I = $1 WHERE id = $2', TG_TABLE_NAME, column_name) USING new_value, NEW.id; + NEW.backup := jsonb_set(COALESCE(NEW.backup, '{}'::jsonb), ARRAY[column_name], to_jsonb(old_value)); + END IF; + END IF; + END LOOP; + NEW.new := FALSE; + END IF; + ELSE + -- During non-sync transaction + FOR column_name IN SELECT columns.column_name + FROM information_schema.columns + WHERE columns.table_name = TG_TABLE_NAME + AND columns.table_schema = TG_TABLE_SCHEMA + AND columns.column_name NOT IN ('id', 'synced', 'modified_columns', 'backup', 'deleted', 'new', 'sent_to_server', 'search_vector') LOOP + EXECUTE format('SELECT ($1).%I', column_name) USING NEW INTO new_value; + EXECUTE format('SELECT ($1).%I', column_name) USING OLD INTO old_value; + IF new_value IS DISTINCT FROM old_value THEN + IF NOT (column_name = ANY(OLD.modified_columns)) THEN + NEW.modified_columns := array_append(NEW.modified_columns, column_name); + NEW.backup := jsonb_set(COALESCE(NEW.backup, '{}'::jsonb), ARRAY[column_name], to_jsonb(old_value)); + END IF; + END IF; + END LOOP; + NEW.sent_to_server := FALSE; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE TRIGGER issue_update_trigger +BEFORE UPDATE ON issue +FOR EACH ROW +EXECUTE FUNCTION handle_update(); + +CREATE OR REPLACE TRIGGER comment_update_trigger +BEFORE UPDATE ON comment +FOR EACH ROW +EXECUTE FUNCTION handle_update(); + +-- # Functions to revert local changes using the backup column + +CREATE OR REPLACE FUNCTION revert_local_changes(table_name TEXT, row_id UUID) +RETURNS VOID AS $$ +DECLARE + backup_data JSONB; + column_name TEXT; + column_value JSONB; +BEGIN + EXECUTE format('SELECT backup FROM %I WHERE id = $1', table_name) + INTO backup_data + USING row_id; + + IF backup_data IS NOT NULL THEN + FOR column_name, column_value IN SELECT * FROM jsonb_each(backup_data) + LOOP + EXECUTE format('UPDATE %I SET %I = $1, modified_columns = array_remove(modified_columns, $2) WHERE id = $3', table_name, column_name) + USING column_value, column_name, row_id; + END LOOP; + + -- Clear the backup after reverting + EXECUTE format('UPDATE %I SET backup = NULL WHERE id = $1', table_name) + USING row_id; + END IF; +END; +$$ LANGUAGE plpgsql; + +-- Example usage: +-- SELECT revert_local_changes('issue', '123e4567-e89b-12d3-a456-426614174000'); +-- SELECT revert_local_changes('comment', '123e4567-e89b-12d3-a456-426614174001'); + + +ALTER TABLE issue DISABLE TRIGGER ALL; +ALTER TABLE comment DISABLE TRIGGER ALL; diff --git a/examples/linearlite/db/migrations-client/post-initial-sync-fts-index.sql b/examples/linearlite/db/migrations-client/post-initial-sync-fts-index.sql new file mode 100644 index 0000000000..9e292c053d --- /dev/null +++ b/examples/linearlite/db/migrations-client/post-initial-sync-fts-index.sql @@ -0,0 +1 @@ +CREATE INDEX IF NOT EXISTS "issue_search_idx" ON "issue" USING GIN ((setweight(to_tsvector('simple', coalesce(title, '')), 'A') || setweight(to_tsvector('simple', coalesce(description, '')), 'B'))); \ No newline at end of file diff --git a/examples/linearlite/db/migrations-client/post-initial-sync-indexes.sql b/examples/linearlite/db/migrations-client/post-initial-sync-indexes.sql new file mode 100644 index 0000000000..30328e4677 --- /dev/null +++ b/examples/linearlite/db/migrations-client/post-initial-sync-indexes.sql @@ -0,0 +1,11 @@ +CREATE INDEX IF NOT EXISTS "issue_priority_idx" ON "issue" ("priority"); +CREATE INDEX IF NOT EXISTS "issue_status_idx" ON "issue" ("status"); +CREATE INDEX IF NOT EXISTS "issue_modified_idx" ON "issue" ("modified"); +CREATE INDEX IF NOT EXISTS "issue_created_idx" ON "issue" ("created"); +CREATE INDEX IF NOT EXISTS "issue_kanbanorder_idx" ON "issue" ("kanbanorder"); +CREATE INDEX IF NOT EXISTS "issue_deleted_idx" ON "issue" ("deleted"); +CREATE INDEX IF NOT EXISTS "issue_synced_idx" ON "issue" ("synced"); +CREATE INDEX IF NOT EXISTS "comment_issue_id_idxx" ON "comment" ("issue_id"); +CREATE INDEX IF NOT EXISTS "comment_created_idx" ON "comment" ("created"); +CREATE INDEX IF NOT EXISTS "comment_deleted_idx" ON "comment" ("deleted"); +CREATE INDEX IF NOT EXISTS "comment_synced_idx" ON "comment" ("synced"); \ No newline at end of file diff --git a/examples/linearlite/db/migrations-server/01-create_tables.sql b/examples/linearlite/db/migrations-server/01-create_tables.sql new file mode 100644 index 0000000000..84c2cac11e --- /dev/null +++ b/examples/linearlite/db/migrations-server/01-create_tables.sql @@ -0,0 +1,24 @@ +-- Create the tables for the linearlite example +CREATE TABLE IF NOT EXISTS "issue" ( + "id" UUID NOT NULL, + "title" TEXT NOT NULL, + "description" TEXT NOT NULL, + "priority" TEXT NOT NULL, + "status" TEXT NOT NULL, + "modified" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "created" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "kanbanorder" TEXT NOT NULL, + "username" TEXT NOT NULL, + CONSTRAINT "issue_pkey" PRIMARY KEY ("id") +); + +CREATE TABLE IF NOT EXISTS "comment" ( + "id" UUID NOT NULL, + "body" TEXT NOT NULL, + "username" TEXT NOT NULL, + "issue_id" UUID NOT NULL, + "modified" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "created" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + CONSTRAINT "comment_pkey" PRIMARY KEY ("id"), + FOREIGN KEY (issue_id) REFERENCES issue(id) ON DELETE CASCADE +); diff --git a/examples/linearlite/eslint.config.js b/examples/linearlite/eslint.config.js new file mode 100644 index 0000000000..99c462472e --- /dev/null +++ b/examples/linearlite/eslint.config.js @@ -0,0 +1,87 @@ +// @ts-expect-error no types +import js from '@eslint/js' +// @ts-expect-error no types +import { FlatCompat } from '@eslint/eslintrc' +import globals from 'globals' + +import eslintTsParser from '@typescript-eslint/parser' +import tsPlugin from '@typescript-eslint/eslint-plugin' + +import pluginReact from "@eslint-react/eslint-plugin"; +// @ts-expect-error no types +import pluginReactCompiler from "eslint-plugin-react-compiler"; +// @ts-expect-error no types +import pluginReactHooks from "eslint-plugin-react-hooks"; + +const compat = new FlatCompat() + +export default [ + js.configs.recommended, + ...compat.extends('plugin:@typescript-eslint/recommended'), + { + languageOptions: { + parser: eslintTsParser, + globals: { + ...globals.browser, + }, + }, + files: ['**/*.{ts,tsx}'], + plugins: { + '@typescript-eslint': tsPlugin, + }, + rules: { + ...tsPlugin.configs.recommended?.rules, + '@typescript-eslint/no-unused-vars': [ + 'warn', // or "error" + { + argsIgnorePattern: '^_', + varsIgnorePattern: '^_', + caughtErrorsIgnorePattern: '^_', + }, + ], + '@typescript-eslint/no-inferrable-types': 'off', // always allow explicit typings + '@typescript-eslint/no-empty-function': 'off', + '@typescript-eslint/no-unused-expressions': 'off', + '@typescript-eslint/no-explicit-any': ['warn', { ignoreRestArgs: true }], + '@typescript-eslint/ban-ts-comment': [ + 'error', + { 'ts-ignore': 'allow-with-description' }, + ], + 'no-constant-condition': ['error', { checkLoops: false }], + eqeqeq: ['error'], + }, + }, + { + files: ['**/*.cjs'], + languageOptions: { + globals: { + ...globals.node, + }, + }, + rules: { + '@typescript-eslint/no-var-requires': 'off', + }, + }, + { + files: ["**/*.{ts,tsx}"], + ...pluginReact.configs.recommended, + }, + { + plugins: { + "react-hooks": pluginReactHooks, + "react-compiler": pluginReactCompiler, + }, + rules: { + "react-compiler/react-compiler": "error", + "react-hooks/exhaustive-deps": "error", + "react-hooks/rules-of-hooks": "error", + }, + }, + { + files: ["**/test/**"], + rules: { + "@typescript-eslint/no-unnecessary-condition": "off", + "react-compiler/react-compiler": "off", + }, + }, +] diff --git a/examples/linearlite/index.html b/examples/linearlite/index.html new file mode 100644 index 0000000000..124f0f97f5 --- /dev/null +++ b/examples/linearlite/index.html @@ -0,0 +1,21 @@ + + +
+ + + ++ This is an example of a team collaboration app such as{` `} + + Linear + + {` `} + built using{` `} + + ElectricSQL + + {` `}- the local-first sync layer for web and mobile apps. +
++ This example is built on top of the excellent clone of the Linear UI + built by{` `} + + Tuan Nguyen + + . +
++ We have replaced the canned data with a stack running{` `} + + Electric + + {` `} + in Docker. +
+( + menuId: string + ): ( + Child: React.ComponentType
+ ) => React.ComponentType
+ export function showMenu(opts?: any, target?: HTMLElement): void
+ export function hideMenu(opts?: any, target?: HTMLElement): void
+}
+
+declare module '@firefox-devtools/react-contextmenu/modules/actions' {
+ export function showMenu(opts?: any, target?: HTMLElement): void
+ export function hideMenu(opts?: any, target?: HTMLElement): void
+}
diff --git a/examples/linearlite/src/style.css b/examples/linearlite/src/style.css
new file mode 100644
index 0000000000..9b39c3c85e
--- /dev/null
+++ b/examples/linearlite/src/style.css
@@ -0,0 +1,73 @@
+@tailwind base;
+@tailwind components;
+@tailwind utilities;
+body {
+ font-size: 12px;
+ @apply font-medium text-gray-600;
+}
+
+@font-face {
+ font-family: 'Inter UI';
+ font-style: normal;
+ font-weight: 400;
+ font-display: swap;
+ src:
+ url('assets/fonts/Inter-UI-Regular.woff2') format('woff2'),
+ url('assets/fonts/Inter-UI-Regular.woff') format('woff');
+}
+
+@font-face {
+ font-family: 'Inter UI';
+ font-style: normal;
+ font-weight: 500;
+ font-display: swap;
+ src:
+ url('assets/fonts/Inter-UI-Medium.woff2') format('woff2'),
+ url('assets/fonts/Inter-UI-Medium.woff') format('woff');
+}
+
+@font-face {
+ font-family: 'Inter UI';
+ font-style: normal;
+ font-weight: 600;
+ font-display: swap;
+ src:
+ url('assets/fonts/Inter-UI-SemiBold.woff2') format('woff2'),
+ url('assets/fonts/Inter-UI-SemiBold.woff') format('woff');
+}
+
+@font-face {
+ font-family: 'Inter UI';
+ font-style: normal;
+ font-weight: 800;
+ font-display: swap;
+ src:
+ url('assets/fonts/Inter-UI-ExtraBold.woff2') format('woff2'),
+ url('assets/fonts/Inter-UI-ExtraBold.woff') format('woff');
+}
+
+.modal {
+ max-width: calc(100vw - 32px);
+ max-height: calc(100vh - 32px);
+}
+
+.editor ul {
+ list-style-type: circle;
+}
+.editor ol {
+ list-style-type: decimal;
+}
+
+#root,
+body,
+html {
+ height: 100%;
+}
+
+.tiptap p.is-editor-empty:first-child::before {
+ color: #adb5bd;
+ content: attr(data-placeholder);
+ float: left;
+ height: 0;
+ pointer-events: none;
+}
diff --git a/examples/linearlite/src/sync.ts b/examples/linearlite/src/sync.ts
new file mode 100644
index 0000000000..ea5a84f528
--- /dev/null
+++ b/examples/linearlite/src/sync.ts
@@ -0,0 +1,318 @@
+import { Mutex } from '@electric-sql/pglite'
+import { type PGliteWithLive } from '@electric-sql/pglite/live'
+import { type PGliteWithSync } from '@electric-sql/pglite-sync'
+import type { IssueChange, CommentChange, ChangeSet } from './utils/changes'
+import { postInitialSync } from './migrations'
+import { useEffect, useState } from 'react'
+
+const WRITE_SERVER_URL = import.meta.env.VITE_WRITE_SERVER_URL
+ ? import.meta.env.VITE_WRITE_SERVER_URL
+ : `http://localhost:3001`
+const ELECTRIC_URL = import.meta.env.VITE_ELECTRIC_URL
+ ? new URL(import.meta.env.VITE_ELECTRIC_URL).origin
+ : `http://localhost:3000`
+const ELECTRIC_DATABASE_ID = import.meta.env.VITE_ELECTRIC_DATABASE_ID
+const ELECTRIC_TOKEN = import.meta.env.VITE_ELECTRIC_TOKEN
+const APPLY_CHANGES_URL = `${WRITE_SERVER_URL}/apply-changes`
+
+type SyncStatus = 'initial-sync' | 'done'
+
+type PGliteWithExtensions = PGliteWithLive & PGliteWithSync
+
+export async function startSync(pg: PGliteWithExtensions) {
+ await startSyncToDatabase(pg)
+ startWritePath(pg)
+}
+
+async function startSyncToDatabase(pg: PGliteWithExtensions) {
+ // Check if there are any issues already in the database
+ const issues = await pg.query(`SELECT 1 FROM issue LIMIT 1`)
+ const hasIssuesAtStart = issues.rows.length > 0
+
+ let issueShapeInitialSyncDone = false
+ let commentShapeInitialSyncDone = false
+ let postInitialSyncDone = false
+
+ if (!hasIssuesAtStart && !postInitialSyncDone) {
+ updateSyncStatus('initial-sync', 'Downloading shape data...')
+ }
+
+ let postInitialSyncDoneResolver: () => void
+ const postInitialSyncDonePromise = new Promise