Skip to content

Commit

Permalink
chore: e2e test for multi tenancy (#1939)
Browse files Browse the repository at this point in the history
This PR adds an e2e test for multi tenancy.
It checks:
- adding new tenants
- per-tenant health check
- initial sync works
- live queries work and does not interfere between tenants
- tenants are persisted and reloaded when Electric restarts
- tenants can be deleted
  • Loading branch information
kevin-dp authored Nov 7, 2024
1 parent 970cbe0 commit 38afb0c
Show file tree
Hide file tree
Showing 5 changed files with 296 additions and 15 deletions.
7 changes: 7 additions & 0 deletions .github/workflows/integration_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -66,4 +66,11 @@ jobs:
run: make

- name: Run integration tests
id: tests
run: ./run.sh
- name: Upload lux logs
uses: actions/upload-artifact@v3
if: ${{ failure() && steps.tests.outcome == 'failure' }}
with:
name: lux_logs
path: integration-tests/**/lux_logs/run_*
75 changes: 63 additions & 12 deletions integration-tests/tests/_macros.luxinc
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
[global pg_host_port=54331]
[global database_url=postgresql://postgres:password@localhost:$pg_host_port/postgres?sslmode=disable]

[macro setup_pg initdb_args config_opts]
[shell pg]
[macro setup_pg_with_name shell_name initdb_args config_opts]
[shell $shell_name]
-$fail_pattern

!docker run \
Expand All @@ -27,6 +27,10 @@
-
[endmacro]

[macro setup_pg initdb_args config_opts]
[invoke setup_pg_with_name "pg" $initdb_args $config_opts]
[endmacro]

[macro stop_pg]
[shell pg_lifecycle]
# This timeout is needed until https://github.com/electric-sql/electric/issues/1632 is fixed.
Expand All @@ -39,20 +43,20 @@
[endmacro]

[macro resume_pg]
[shell pg_lifecycle]
!docker start $pg_container_name
?$PS1

[shell pg]
!docker attach $pg_container_name
!docker start --attach $pg_container_name
??database system is ready to accept connections
[endmacro]

[macro start_psql]
[shell psql]
[macro start_psql_shell shell_name pg_container_name]
[shell $shell_name]
!docker exec -u postgres -it $pg_container_name psql
[endmacro]

[macro start_psql]
[invoke start_psql_shell psql $pg_container_name]
[endmacro]

[macro seed_pg]
[shell psql]
!docker exec -u postgres -it $pg_container_name psql
Expand Down Expand Up @@ -80,24 +84,71 @@
[endmacro]

[macro setup_electric]
[invoke setup_electric_with_env "ELECTRIC_DATABASE_ID=integration_test_tenant DATABASE_URL=$database_url"]
[endmacro]

[macro setup_multi_tenant_electric]
[invoke setup_electric_with_env ""]
[endmacro]

[macro setup_electric_with_env env]
[invoke setup_electric_shell "electric" "3000" $env]
[endmacro]

[macro setup_electric_with_env_and_tenant env]
[invoke setup_electric_with_env "ELECTRIC_DATABASE_ID=integration_test_tenant DATABASE_URL=$database_url $env"]
[endmacro]

[macro setup_electric_shell shell_name port env]
[shell $shell_name]
-$fail_pattern

!ELECTRIC_DATABASE_ID=integration_test_tenant DATABASE_URL=$database_url ELECTRIC_PORT=$port $env ../scripts/electric_dev.sh
!ELECTRIC_PORT=$port $env ../scripts/electric_dev.sh
[endmacro]

[macro teardown]
[macro setup_electric_shell_with_tenant shell_name port]
[invoke setup_electric_shell $shell_name $port "ELECTRIC_DATABASE_ID=integration_test_tenant DATABASE_URL=$database_url"]
[endmacro]

[macro add_tenant tenant_id electric_port]
[shell $tenant_id]
!curl -X POST http://localhost:$electric_port/v1/admin/database \
-H "Content-Type: application/json" \
-d "{\"database_id\":\"$tenant_id\",\"database_url\":\"$database_url\"}"
??"$tenant_id"
[endmacro]

[macro check_tenant_status tenant_id expected_status electric_port]
[shell $tenant_id]
[invoke wait-for "curl -X GET http://localhost:$electric_port/v1/health?database_id=$tenant_id" "\{\"status\":\"$expected_status\"\}" 10 $PS1]
[endmacro]

[macro teardown_container container_name]
-$fail_pattern
!docker rm -f -v $pg_container_name
!docker rm -f -v $container_name
?$PS1
[endmacro]

[macro teardown]
[invoke teardown_container $pg_container_name]
!../scripts/clean_up.sh
?$PS1
[endmacro]

[macro wait-for command match max_time prompt]
[loop i 1..$max_time]
@$match
!$command
??$command
?$prompt
[sleep 1]
[endloop]
# The last prompt won't match since the loop pattern will
# match before it, so match it here instead.
?$prompt

# Sync up after the loop.
!$command
??$command
?$prompt
[endmacro]
223 changes: 223 additions & 0 deletions integration-tests/tests/multi-tenancy.lux
Original file line number Diff line number Diff line change
@@ -0,0 +1,223 @@
[doc Verify support for multi tenancy]

[include _macros.luxinc]

[global tenant1_pg_container_name=multi-tenancy-tenant1__pg]
[global tenant1_pg_host_port=54331]
[global tenant1_database_url=postgresql://postgres:password@localhost:$tenant1_pg_host_port/postgres?sslmode=disable]

[global tenant2_pg_container_name=multi-tenancy-tenant2__pg]
[global tenant2_pg_host_port=54332]
[global tenant2_database_url=postgresql://postgres:password@localhost:$tenant2_pg_host_port/postgres?sslmode=disable]

###

## Start a new Postgres DB
[global pg_container_name=$tenant1_pg_container_name]
[global pg_host_port=$tenant1_pg_host_port]
[global database_url=$tenant1_database_url]
[invoke setup_pg_with_name "tenant1_pg" "" ""]

## Start Electric in multi tenancy mode
[invoke setup_multi_tenant_electric]

[shell electric]
???[info] Running Electric.Plug.Router with Bandit 1.5.5 at 0.0.0.0:3000 (http)

## Create tenant 1
[invoke add_tenant "tenant1" 3000]
[invoke check_tenant_status "tenant1" "active" 3000]

## Setup a second Postgres DB
[global pg_container_name=$tenant2_pg_container_name]
[global pg_host_port=$tenant2_pg_host_port]
[global database_url=$tenant2_database_url]
[invoke setup_pg_with_name "tenant2_pg" "" ""]

## Create tenant 2
[invoke add_tenant "tenant2" 3000]
[invoke check_tenant_status "tenant2" "active" 3000]

## Insert some data in both DBs
[invoke start_psql_shell "tenant1_psql" $tenant1_pg_container_name]
[invoke start_psql_shell "tenant2_psql" $tenant2_pg_container_name]

[shell tenant1_psql]
!CREATE TABLE items (id INT PRIMARY KEY, val TEXT);
??CREATE TABLE
!INSERT INTO items (id, val) VALUES (1, 'tenant1');
??INSERT 0 1

[shell tenant2_psql]
!CREATE TABLE items (id INT PRIMARY KEY, val TEXT);
??CREATE TABLE
!INSERT INTO items (id, val) VALUES (1, 'tenant2');
??INSERT 0 1

## Check that both tenants can query their data
[shell tenant1]
# Chech tenant 1 data
!curl -i -X GET "http://localhost:3000/v1/shape?table=items&offset=-1&database_id=tenant1"
?\e\[1melectric-handle\e\[0m: ([\d-]+)
[local shape_id=$1]
?\e\[1melectric-offset\e\[0m: ([\d_]+)
[local offset=$1]
"""??
[{"key":"\"public\".\"items\"/\"1\"","value":{"id":"1","val":"tenant1"},"headers":{"operation":"insert","relation":["public","items"]},"offset":"$offset"}
]
"""

# Check tenant 2 data
[shell tenant2]
!curl -i -X GET "http://localhost:3000/v1/shape?table=items&offset=-1&database_id=tenant2"
?\e\[1melectric-handle\e\[0m: ([\d-]+)
[local shape_id=$1]
?\e\[1melectric-offset\e\[0m: ([\d_]+)
[local offset=$1]
"""??
[{"key":"\"public\".\"items\"/\"1\"","value":{"id":"1","val":"tenant2"},"headers":{"operation":"insert","relation":["public","items"]},"offset":"$offset"}
]
"""

## Now do a live query on tenant 1
[shell tenant1]
??$PS1
!curl -i -X GET "localhost:3000/v1/shape?table=items&offset=$offset&handle="$shape_id"&database_id=tenant1&live"

## And a live query on tenant 2
[shell tenant2]
??$PS1
!curl -i -X GET "localhost:3000/v1/shape?table=items&offset=$offset&handle="$shape_id"&database_id=tenant2&live"

## Insert some data in tenant 1
[shell tenant1_psql]
!INSERT INTO items (id, val) VALUES (2, 'tenant1');
??INSERT 0 1

## Insert some data in tenant 2
[shell tenant2_psql]
!INSERT INTO items (id, val) VALUES (2, 'tenant2');
??INSERT 0 1

## Check that tenant 1 sees the new data
[shell tenant1]
# give some time for the data to sync
[sleep 1]
?\e\[1melectric-offset\e\[0m: ([\d_]+)
[local offset=$1]
??[{"offset":"$offset","value":{"id":"2","val":"tenant1"},"key":"\"public\".\"items\"/\"2\"","headers":{"relation":["public","items"],"operation":"insert","txid":
?[\d+]
??}},{"headers":{"control":"up-to-date"}}]$PS1

## Check that tenant 2 sees the new data
[shell tenant2]
[sleep 1]
?\e\[1melectric-offset\e\[0m: ([\d_]+)
[local offset=$1]
??[{"offset":"$offset","value":{"id":"2","val":"tenant2"},"key":"\"public\".\"items\"/\"2\"","headers":{"relation":["public","items"],"operation":"insert","txid":
?[\d+]
??}},{"headers":{"control":"up-to-date"}}]$PS1

# Disable fail pattern for Electric as we are going to kill it
[shell electric]
-

## kill Electric
[shell orchestrator]
!kill $(lsof -ti:3000)
??$PS1

## restart Electric
[shell electric]
??$PS1
# Re-enable fail pattern for Electric
-$fail_pattern
[invoke setup_multi_tenant_electric]
???Reloading tenant tenant1 from storage
???Reloading tenant tenant2 from storage
???[info] Running Electric.Plug.Router with Bandit 1.5.5 at 0.0.0.0:3000 (http)

## Make a query to check that they still see their data
[shell tenant1]
# Query the shape
!curl -i -X GET "http://localhost:3000/v1/shape?table=items&offset=${offset}&handle=${shape_id}&database_id=tenant1"
???[{"headers":{"control":"up-to-date"}}]
??$PS1

[shell tenant2]
# Query the shape
!curl -i -X GET "http://localhost:3000/v1/shape?table=items&offset=${offset}&handle=${shape_id}&database_id=tenant2"
???[{"headers":{"control":"up-to-date"}}]
??$PS1

## Make a live query on both and check that it still works
[shell tenant1]
!curl -i -X GET "localhost:3000/v1/shape?table=items&offset=$offset&handle="$shape_id"&database_id=tenant1&live"

[shell tenant2]
!curl -i -X GET "localhost:3000/v1/shape?table=items&offset=$offset&handle="$shape_id"&database_id=tenant2&live"

## Insert some data in tenant 1
[shell tenant1_psql]
!INSERT INTO items (id, val) VALUES (3, 'tenant 1');
??INSERT 0 1

## Insert some data in tenant 2
[shell tenant2_psql]
!INSERT INTO items (id, val) VALUES (3, 'tenant 2');
??INSERT 0 1

## Check that tenant 1 sees the new data
[shell tenant1]
# give some time for the data to sync
[sleep 1]
?\e\[1melectric-offset\e\[0m: ([\d_]+)
[local offset=$1]
??[{"offset":"$offset","value":{"id":"3","val":"tenant 1"},"key":"\"public\".\"items\"/\"3\"","headers":{"relation":["public","items"],"operation":"insert","txid":
?[\d+]
??}},{"headers":{"control":"up-to-date"}}]$PS1

## Check that tenant 2 sees the new data
[shell tenant2]
[sleep 1]
?\e\[1melectric-offset\e\[0m: ([\d_]+)
[local offset=$1]
??[{"offset":"$offset","value":{"id":"3","val":"tenant 2"},"key":"\"public\".\"items\"/\"3\"","headers":{"relation":["public","items"],"operation":"insert","txid":
?[\d+]
??}},{"headers":{"control":"up-to-date"}}]$PS1

[shell electric]
# disable fail pattern because deleting a tenant will stop the tenant processes
# which will output some error messages because of the shutdown
-

## delete one of the tenants
[shell orchestrator]
!curl -X DELETE http://localhost:3000/v1/admin/database/tenant2
???"tenant2"
??$PS1
# Verify that tenant 2 is deleted
!curl -X GET http://localhost:3000/v1/health?database_id="tenant2"
???"Database not found"
??$PS1
# Verify that tenant 1 still exists
[invoke check_tenant_status "tenant1" "active" 3000]

## kill Electric
[shell orchestrator]
!kill $(lsof -ti:3000)
??$PS1

## restart Electric and check that only tenant 1 is reloaded and not tenant 2
[shell electric]
??$PS1
# Set fail pattern to fail if tenant 2 is reloaded
-Reloading tenant tenant2 from storage
!PORT=3000 ../scripts/electric_dev.sh
???Reloading tenant tenant1 from storage
???[info] Running Electric.Plug.Router with Bandit 1.5.5 at 0.0.0.0:3000 (http)

[cleanup]
[invoke teardown]
# Also tear down the first tenant
[invoke teardown_container $tenant1_pg_container_name]
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
??INSERT 0 2

## Start the sync service with the CrashingFileStorage storage backend.
[invoke setup_electric_with_env "ELECTRIC_STORAGE=crashing_file CRASHING_FILE_ELECTRIC_STORAGE__NUM_CALLS_UNTIL_CRASH=2"]
[invoke setup_electric_with_env_and_tenant "ELECTRIC_STORAGE=crashing_file CRASHING_FILE_ELECTRIC_STORAGE__NUM_CALLS_UNTIL_CRASH=2"]

[shell electric]
??[info] Starting replication from postgres
Expand Down
4 changes: 2 additions & 2 deletions integration-tests/tests/rolling-deploy.lux
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
[invoke setup_pg "" ""]

## Start the first sync service.
[invoke setup_electric_shell "electric_1" "3000" ""]
[invoke setup_electric_shell_with_tenant "electric_1" "3000"]

[shell electric_1]
??[info] Acquiring lock from postgres with name electric_slot_integration
Expand All @@ -23,7 +23,7 @@
??{"status":"active"}

## Start the second sync service.
[invoke setup_electric_shell "electric_2" "3001" ""]
[invoke setup_electric_shell_with_tenant "electric_2" "3001"]

## Assert that the lock is not acquired and replication does not start
## in the second electric
Expand Down

0 comments on commit 38afb0c

Please sign in to comment.