diff --git a/.github/workflows/bats.yml b/.github/workflows/bats.yml new file mode 100644 index 00000000..7378d41f --- /dev/null +++ b/.github/workflows/bats.yml @@ -0,0 +1,19 @@ +name: "E2E Tests" + +on: + pull_request: + branches: [main] + +jobs: + integration: + name: End to End Test + runs-on: ubuntu-latest + steps: + - name: Install Nix + uses: DeterminateSystems/nix-installer-action@v4 + - name: Run the Magic Nix Cache + uses: DeterminateSystems/magic-nix-cache-action@v2 + - uses: actions/checkout@v3 + - name: Run e2e tests + run: nix develop -c make e2e + diff --git a/.gitignore b/.gitignore index d7da8ffd..b8ece28e 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,7 @@ Cargo.lock *.pdb .direnv/ + +- +.e2e-logs +.cala diff --git a/Makefile b/Makefile index 0bdf14ec..021c3393 100644 --- a/Makefile +++ b/Makefile @@ -30,5 +30,11 @@ check-code: sdl SQLX_OFFLINE=true cargo clippy --all-features SQLX_OFFLINE=true cargo audit +build: + SQLX_OFFLINE=true cargo build --locked + +e2e: clean-deps start-deps build + bats -t bats + sdl: SQLX_OFFLINE=true cargo run --bin write_sdl > cala-server/schema.graphql diff --git a/bats/examples.bats b/bats/examples.bats new file mode 100644 index 00000000..e5df3dda --- /dev/null +++ b/bats/examples.bats @@ -0,0 +1,33 @@ +#!/usr/bin/env bats + +load "helpers" + +setup_file() { + reset_pg + start_server +} + +teardown_file() { + stop_server +} + + +@test "rust: entities sync to server" { + variables=$( + jq -n \ + '{ + input: { + name: "rust-example", + endpoint: "http://localhost:2253" + } + }' + ) + + exec_graphql 'import-job-create' "$variables" + + name=$(graphql_output '.data.importJobCreate.importJob.name') + [[ "$name" == "rust-example" ]] || exit 1; + + + cargo run --bin cala-ledger-example-rust +} diff --git a/bats/gql/import-job-create.gql b/bats/gql/import-job-create.gql new file mode 100644 index 00000000..17930849 --- /dev/null +++ b/bats/gql/import-job-create.gql @@ -0,0 +1,9 @@ +mutation importJobCreate($input: ImportJobCreateInput!) { + importJobCreate(input: $input) { + importJob { + importJobId + name + } + } +} + diff --git a/bats/helpers.bash b/bats/helpers.bash new file mode 100644 index 00000000..c231d6cd --- /dev/null +++ b/bats/helpers.bash @@ -0,0 +1,107 @@ +REPO_ROOT=$(git rev-parse --show-toplevel) +GQL_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")/gql" +COMPOSE_PROJECT_NAME="${COMPOSE_PROJECT_NAME:-${REPO_ROOT##*/}}" + +GQL_ENDPOINT="http://localhost:2252/graphql" + +CALA_HOME="${CALA_HOME:-.cala}" + +reset_pg() { + docker exec "${COMPOSE_PROJECT_NAME}-server-pg-1" psql $PG_CON -c "DROP SCHEMA public CASCADE" + docker exec "${COMPOSE_PROJECT_NAME}-server-pg-1" psql $PG_CON -c "CREATE SCHEMA public" + docker exec "${COMPOSE_PROJECT_NAME}-examples-pg-1" psql $PG_CON -c "DROP SCHEMA public CASCADE" + docker exec "${COMPOSE_PROJECT_NAME}-examples-pg-1" psql $PG_CON -c "CREATE SCHEMA public" +} + +server_cmd() { + server_location=${REPO_ROOT}/target/debug/cala-server + if [[ ! -z ${CARGO_TARGET_DIR} ]] ; then + server_location=${CARGO_TARGET_DIR}/debug/cala-server + fi + + ${server_location} $@ +} + +start_server() { + background server_cmd > .e2e-logs + for i in {1..20} + do + if head .e2e-logs | grep -q 'Starting graphql server on port'; then + break + else + sleep 1 + fi + done +} + +stop_server() { + if [[ -f ${CALA_HOME}/server-pid ]]; then + kill -9 $(cat ${CALA_HOME}/server-pid) || true + fi +} + +gql_file() { + echo "${GQL_DIR}/$1.gql" +} + +gql_query() { + cat "$(gql_file $1)" | tr '\n' ' ' | sed 's/"/\\"/g' +} + +graphql_output() { + echo $output | jq -r "$@" +} + +exec_graphql() { + local query_name=$1 + local variables=${2:-"{}"} + + if [[ "${BATS_TEST_DIRNAME}" != "" ]]; then + run_cmd="run" + else + run_cmd="" + fi + + ${run_cmd} curl -s \ + -X POST \ + ${AUTH_HEADER:+ -H "$AUTH_HEADER"} \ + -H "Content-Type: application/json" \ + -d "{\"query\": \"$(gql_query $query_name)\", \"variables\": $variables}" \ + "${GQL_ENDPOINT}" +} + +# Run the given command in the background. Useful for starting a +# node and then moving on with commands that exercise it for the +# test. +# +# Ensures that BATS' handling of file handles is taken into account; +# see +# https://github.com/bats-core/bats-core#printing-to-the-terminal +# https://github.com/sstephenson/bats/issues/80#issuecomment-174101686 +# for details. +background() { + "$@" 3>- & + echo $! +} + +# Taken from https://github.com/docker/swarm/blob/master/test/integration/helpers.bash +# Retry a command $1 times until it succeeds. Wait $2 seconds between retries. +retry() { + local attempts=$1 + shift + local delay=$1 + shift + local i + + for ((i=0; i < attempts; i++)); do + run "$@" + if [[ "$status" -eq 0 ]] ; then + return 0 + fi + sleep "$delay" + done + + echo "Command \"$*\" failed $attempts times. Output: $output" + false +} + diff --git a/cala-server/schema.graphql b/cala-server/schema.graphql index 2e95db5a..f2d1378f 100644 --- a/cala-server/schema.graphql +++ b/cala-server/schema.graphql @@ -76,6 +76,10 @@ input ImportJobCreateInput { endpoint: String! } +type ImportJobCreatePayload { + importJob: ImportJob! +} + """ An edge in a connection. """ @@ -116,7 +120,7 @@ type JournalCreatePayload { type Mutation { journalCreate(input: JournalCreateInput!): JournalCreatePayload! - importJobCreate(input: ImportJobCreateInput!): ImportJob! + importJobCreate(input: ImportJobCreateInput!): ImportJobCreatePayload! } """ diff --git a/cala-server/src/cli/mod.rs b/cala-server/src/cli/mod.rs index 46c9971f..307473ef 100644 --- a/cala-server/src/cli/mod.rs +++ b/cala-server/src/cli/mod.rs @@ -1,8 +1,9 @@ pub mod config; mod db; +use anyhow::Context; use clap::Parser; -use std::path::PathBuf; +use std::{fs, path::PathBuf}; use self::config::{Config, EnvOverride}; @@ -17,6 +18,13 @@ struct Cli { value_name = "FILE" )] config: PathBuf, + #[clap( + long, + env = "CALA_HOME", + default_value = ".cala", + value_name = "DIRECTORY" + )] + cala_home: String, #[clap(env = "PG_CON")] pg_con: String, } @@ -26,14 +34,15 @@ pub async fn run() -> anyhow::Result<()> { let config = Config::from_path(cli.config, EnvOverride { db_con: cli.pg_con })?; - run_cmd(config).await?; + run_cmd(&cli.cala_home, config).await?; Ok(()) } -async fn run_cmd(config: Config) -> anyhow::Result<()> { +async fn run_cmd(cala_home: &str, config: Config) -> anyhow::Result<()> { use cala_ledger::{CalaLedger, CalaLedgerConfig}; cala_tracing::init_tracer(config.tracing)?; + store_server_pid(cala_home, std::process::id())?; let pool = db::init_pool(&config.db).await?; let ledger_config = CalaLedgerConfig::builder().pool(pool.clone()).build()?; let ledger = CalaLedger::init(ledger_config).await?; @@ -41,3 +50,15 @@ async fn run_cmd(config: Config) -> anyhow::Result<()> { crate::server::run(config.server, app).await?; Ok(()) } + +pub fn store_server_pid(cala_home: &str, pid: u32) -> anyhow::Result<()> { + create_cala_dir(cala_home)?; + let _ = fs::remove_file(format!("{cala_home}/server-pid")); + fs::write(format!("{cala_home}/server-pid"), pid.to_string()).context("Writing PID file")?; + Ok(()) +} + +fn create_cala_dir(bria_home: &str) -> anyhow::Result<()> { + let _ = fs::create_dir(bria_home); + Ok(()) +} diff --git a/cala-server/src/graphql/import_job.rs b/cala-server/src/graphql/import_job.rs index a3b26622..9b9223b5 100644 --- a/cala-server/src/graphql/import_job.rs +++ b/cala-server/src/graphql/import_job.rs @@ -20,7 +20,7 @@ pub struct ImportJob { #[derive(SimpleObject)] pub struct ImportJobCreatePayload { - pub journal: ImportJob, + pub import_job: ImportJob, } #[derive(Serialize, Deserialize)] diff --git a/cala-server/src/graphql/schema.rs b/cala-server/src/graphql/schema.rs index 2070596c..05ae5583 100644 --- a/cala-server/src/graphql/schema.rs +++ b/cala-server/src/graphql/schema.rs @@ -107,11 +107,13 @@ impl Mutation { &self, ctx: &Context<'_>, input: ImportJobCreateInput, - ) -> Result { + ) -> Result { let app = ctx.data_unchecked::(); - Ok(app - .create_import_job(input.name, input.description, input.endpoint) - .await - .map(ImportJob::from)?) + Ok(ImportJobCreatePayload { + import_job: app + .create_import_job(input.name, input.description, input.endpoint) + .await + .map(ImportJob::from)?, + }) } } diff --git a/cala-server/src/import_job/entity.rs b/cala-server/src/import_job/entity.rs index a4bf4371..ecaeb437 100644 --- a/cala-server/src/import_job/entity.rs +++ b/cala-server/src/import_job/entity.rs @@ -71,15 +71,16 @@ pub struct NewImportJob { impl NewImportJob { pub fn builder() -> NewImportJobBuilder { - NewImportJobBuilder::default() + let mut builder = NewImportJobBuilder::default(); + builder.id(ImportJobId::new()); + builder } pub(super) fn initial_events(self) -> EntityEvents { - let id = ImportJobId::new(); EntityEvents::init( - id, + self.id, [ImportJobEvent::Initialized { - id, + id: self.id, name: self.name, description: self.description, import_config: self.import_config, diff --git a/docker-compose.override.yml b/docker-compose.override.yml deleted file mode 100644 index 3c288958..00000000 --- a/docker-compose.override.yml +++ /dev/null @@ -1,8 +0,0 @@ -version: "3" -services: - postgres: - ports: - - "5432:5432" - otel-agent: - ports: - - "4317:4317" # OpenTelemetry receiver diff --git a/docker-compose.yml b/docker-compose.yml index a994633e..2c68ac61 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -3,9 +3,10 @@ services: integration-deps: image: busybox depends_on: - - postgres + - server-pg + - examples-pg - otel-agent - postgres: + server-pg: image: postgres:14.1 ports: - "5432:5432" @@ -18,7 +19,22 @@ services: interval: 5s timeout: 5s retries: 5 + examples-pg: + image: postgres:14.1 + ports: + - "5433:5432" + environment: + - POSTGRES_USER=user + - POSTGRES_PASSWORD=password + - POSTGRES_DB=pg + healthcheck: + test: [ "CMD-SHELL", "pg_isready" ] + interval: 5s + timeout: 5s + retries: 5 otel-agent: + ports: + - "4317:4317" # OpenTelemetry receiver image: otel/opentelemetry-collector-contrib:0.57.2 command: [ "--config=/etc/otel-agent-config.yaml" ] environment: diff --git a/examples/rust/Cargo.toml b/examples/rust/Cargo.toml index 0c7c975a..da630495 100644 --- a/examples/rust/Cargo.toml +++ b/examples/rust/Cargo.toml @@ -4,10 +4,9 @@ version = "0.1.0" edition = "2021" workspace = "../../" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - [dependencies] -anyhow = "1.0" cala-ledger = { path = "../../cala-ledger" } -tokio = { version = "1.34", features = ["rt-multi-thread", "macros"] } + +anyhow = { workspace = true } +tokio = { workspace = true } sqlx = { workspace = true } diff --git a/examples/rust/src/main.rs b/examples/rust/src/main.rs index 9802a6a6..32792772 100644 --- a/examples/rust/src/main.rs +++ b/examples/rust/src/main.rs @@ -2,8 +2,7 @@ use cala_ledger::{account::*, journal::*, migrate::IncludeMigrations, query::*, #[tokio::main] async fn main() -> anyhow::Result<()> { - let pg_host = std::env::var("PG_HOST").unwrap_or("localhost".to_string()); - let pg_con = format!("postgres://user:password@{pg_host}:5432/pg"); + let pg_con = format!("postgres://user:password@localhost:5433/pg"); let pool = sqlx::postgres::PgPoolOptions::new() .max_connections(20) .connect(&pg_con) @@ -15,6 +14,7 @@ async fn main() -> anyhow::Result<()> { let cala_config = CalaLedgerConfig::builder() .pool(pool) .exec_migrations(false) + .outbox(OutboxServerConfig::default()) .build()?; let cala = CalaLedger::init(cala_config).await?; let new_account = NewAccount::builder()