diff --git a/backend/Cargo.lock b/backend/Cargo.lock index 32be28ed67338..99989b1f52150 100644 --- a/backend/Cargo.lock +++ b/backend/Cargo.lock @@ -455,18 +455,6 @@ dependencies = [ "futures-core", ] -[[package]] -name = "async-channel" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" -dependencies = [ - "concurrent-queue", - "event-listener-strategy", - "futures-core", - "pin-project-lite", -] - [[package]] name = "async-compression" version = "0.3.15" @@ -503,64 +491,6 @@ dependencies = [ "zstd-safe 7.2.1", ] -[[package]] -name = "async-executor" -version = "1.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" -dependencies = [ - "async-task", - "concurrent-queue", - "fastrand 2.2.0", - "futures-lite 2.5.0", - "slab", -] - -[[package]] -name = "async-global-executor" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" -dependencies = [ - "async-channel 2.3.1", - "async-executor", - "async-io", - "async-lock", - "blocking", - "futures-lite 2.5.0", - "once_cell", -] - -[[package]] -name = "async-io" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" -dependencies = [ - "async-lock", - "cfg-if", - "concurrent-queue", - "futures-io", - "futures-lite 2.5.0", - "parking", - "polling", - "rustix", - "slab", - "tracing", - "windows-sys 0.59.0", -] - -[[package]] -name = "async-lock" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" -dependencies = [ - "event-listener 5.3.1", - "event-listener-strategy", - "pin-project-lite", -] - [[package]] name = "async-oauth2" version = "0.5.0" @@ -591,32 +521,6 @@ dependencies = [ "syn 2.0.89", ] -[[package]] -name = "async-std" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c634475f29802fde2b8f0b505b1bd00dfe4df7d4a000f0b36f7671197d5c3615" -dependencies = [ - "async-channel 1.9.0", - "async-global-executor", - "async-io", - "async-lock", - "crossbeam-utils", - "futures-channel", - "futures-core", - "futures-io", - "futures-lite 2.5.0", - "gloo-timers", - "kv-log-macro", - "log", - "memchr", - "once_cell", - "pin-project-lite", - "pin-utils", - "slab", - "wasm-bindgen-futures", -] - [[package]] name = "async-stream" version = "0.3.6" @@ -664,12 +568,6 @@ dependencies = [ "uuid 0.8.2", ] -[[package]] -name = "async-task" -version = "4.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" - [[package]] name = "async-trait" version = "0.1.83" @@ -1167,18 +1065,6 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" -[[package]] -name = "bb8" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d89aabfae550a5c44b43ab941844ffcd2e993cb6900b342debf59e9ea74acdb8" -dependencies = [ - "async-trait", - "futures-util", - "parking_lot", - "tokio", -] - [[package]] name = "better_scoped_tls" version = "0.1.2" @@ -1359,19 +1245,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" -[[package]] -name = "blocking" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" -dependencies = [ - "async-channel 2.3.1", - "async-task", - "futures-io", - "futures-lite 2.5.0", - "piper", -] - [[package]] name = "borsh" version = "1.5.3" @@ -1778,20 +1651,6 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" -[[package]] -name = "combine" -version = "4.6.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" -dependencies = [ - "bytes", - "futures-core", - "memchr", - "pin-project-lite", - "tokio", - "tokio-util", -] - [[package]] name = "comfy-table" version = "7.1.3" @@ -2727,7 +2586,7 @@ dependencies = [ "pin-project", "rustls-tokio-stream", "serde", - "socket2 0.5.7", + "socket2", "tokio", "trust-dns-proto", "trust-dns-resolver", @@ -3302,16 +3161,6 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "event-listener-strategy" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" -dependencies = [ - "event-listener 5.3.1", - "pin-project-lite", -] - [[package]] name = "fallible-iterator" version = "0.2.0" @@ -3614,19 +3463,6 @@ dependencies = [ "waker-fn", ] -[[package]] -name = "futures-lite" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cef40d21ae2c515b51041df9ed313ed21e572df340ea58a922a0aefe7e8891a1" -dependencies = [ - "fastrand 2.2.0", - "futures-core", - "futures-io", - "parking", - "pin-project-lite", -] - [[package]] name = "futures-macro" version = "0.3.31" @@ -4072,12 +3908,6 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" -[[package]] -name = "hermit-abi" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" - [[package]] name = "hex" version = "0.4.3" @@ -4222,9 +4052,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e9b187a72d63adbfba487f48095306ac823049cb504ee195541e91c7775f5ad" dependencies = [ "anyhow", - "async-channel 1.9.0", + "async-channel", "base64 0.13.1", - "futures-lite 1.13.0", + "futures-lite", "http 0.2.12", "infer", "pin-project-lite", @@ -4271,7 +4101,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.7", + "socket2", "tokio", "tower-service", "tracing", @@ -4375,7 +4205,7 @@ dependencies = [ "http-body 1.0.1", "hyper 1.5.1", "pin-project-lite", - "socket2 0.5.7", + "socket2", "tokio", "tower 0.4.13", "tower-service", @@ -4631,7 +4461,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.7", + "socket2", "widestring", "windows-sys 0.48.0", "winreg", @@ -4759,15 +4589,6 @@ version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4933f3f57a8e9d9da04db23fb153356ecaf00cbd14aee46279c33dc80925c37" -[[package]] -name = "kv-log-macro" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" -dependencies = [ - "log", -] - [[package]] name = "lalrpop-util" version = "0.20.2" @@ -4971,9 +4792,6 @@ name = "log" version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" -dependencies = [ - "value-bag", -] [[package]] name = "loki-api" @@ -5374,7 +5192,7 @@ dependencies = [ "rand 0.8.5", "serde", "serde_json", - "socket2 0.5.7", + "socket2", "thiserror 1.0.69", "tokio", "tokio-native-tls", @@ -5582,7 +5400,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi", "libc", ] @@ -6121,17 +5939,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "piper" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" -dependencies = [ - "atomic-waker", - "fastrand 2.2.0", - "futures-io", -] - [[package]] name = "pkcs1" version = "0.4.1" @@ -6181,21 +5988,6 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" -[[package]] -name = "polling" -version = "3.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" -dependencies = [ - "cfg-if", - "concurrent-queue", - "hermit-abi 0.4.0", - "pin-project-lite", - "rustix", - "tracing", - "windows-sys 0.59.0", -] - [[package]] name = "portable-atomic" version = "1.9.0" @@ -6600,7 +6392,7 @@ dependencies = [ "quinn-udp", "rustc-hash 2.0.0", "rustls 0.23.18", - "socket2 0.5.7", + "socket2", "thiserror 2.0.3", "tokio", "tracing", @@ -6635,7 +6427,7 @@ dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2 0.5.7", + "socket2", "tracing", "windows-sys 0.59.0", ] @@ -6655,12 +6447,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" -[[package]] -name = "radix_fmt" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce082a9940a7ace2ad4a8b7d0b1eac6aa378895f18be598230c5f2284ac05426" - [[package]] name = "rand" version = "0.7.3" @@ -6820,28 +6606,6 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03251193000f4bd3b042892be858ee50e8b3719f2b08e5833ac4353724632430" -[[package]] -name = "redis" -version = "0.23.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f49cdc0bb3f412bf8e7d1bd90fe1d9eb10bc5c399ba90973c14662a27b3f8ba" -dependencies = [ - "async-std", - "async-trait", - "bytes", - "combine", - "futures-util", - "itoa", - "percent-encoding", - "pin-project-lite", - "ryu", - "sha1_smol", - "socket2 0.4.10", - "tokio", - "tokio-util", - "url", -] - [[package]] name = "redox_syscall" version = "0.3.5" @@ -7169,21 +6933,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "rsmq_async" -version = "5.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b21409a26e25b1946ce3ab9903a2710fbae8727a05c17f79d165908592743cb" -dependencies = [ - "async-trait", - "bb8", - "lazy_static", - "radix_fmt", - "rand 0.8.5", - "redis", - "thiserror 1.0.69", -] - [[package]] name = "rust-embed" version = "6.8.1" @@ -7407,7 +7156,7 @@ checksum = "22557157d7395bc30727745b365d923f1ecc230c4c80b176545f3f4f08c46e33" dependencies = [ "futures", "rustls 0.23.18", - "socket2 0.5.7", + "socket2", "tokio", ] @@ -7915,12 +7664,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "sha1_smol" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d" - [[package]] name = "sha2" version = "0.9.9" @@ -8123,16 +7866,6 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" -[[package]] -name = "socket2" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "socket2" version = "0.5.7" @@ -9516,7 +9249,7 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.7", + "socket2", "tokio-macros", "tracing", "windows-sys 0.48.0", @@ -9563,7 +9296,7 @@ dependencies = [ "postgres-protocol", "postgres-types", "rand 0.8.5", - "socket2 0.5.7", + "socket2", "tokio", "tokio-util", "whoami", @@ -10420,12 +10153,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" -[[package]] -name = "value-bag" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ef4c4aa54d5d05a279399bfa921ec387b7aba77caf7a682ae8d86785b8fdad2" - [[package]] name = "vcpkg" version = "0.2.15" @@ -10727,7 +10454,6 @@ dependencies = [ "quote", "rand 0.8.5", "reqwest 0.12.9", - "rsmq_async", "serde", "serde_json", "sha2 0.10.8", @@ -10798,7 +10524,6 @@ dependencies = [ "regex", "reqwest 0.12.9", "rsa 0.7.2", - "rsmq_async", "rust-embed", "samael", "serde", @@ -10871,7 +10596,6 @@ name = "windmill-autoscaling" version = "1.430.1" dependencies = [ "anyhow", - "rsmq_async", "serde", "serde_json", "sqlx", @@ -10935,7 +10659,6 @@ name = "windmill-git-sync" version = "1.430.1" dependencies = [ "regex", - "rsmq_async", "serde", "serde_json", "sqlx", @@ -11164,7 +10887,6 @@ dependencies = [ "prometheus", "regex", "reqwest 0.12.9", - "rsmq_async", "serde", "serde_json", "serde_urlencoded", @@ -11231,7 +10953,6 @@ dependencies = [ "rand 0.8.5", "regex", "reqwest 0.12.9", - "rsmq_async", "rust_decimal", "serde", "serde_json", diff --git a/backend/Cargo.toml b/backend/Cargo.toml index e6d14463af7cb..b5f126edf4cef 100644 --- a/backend/Cargo.toml +++ b/backend/Cargo.toml @@ -83,7 +83,6 @@ chrono.workspace = true git-version.workspace = true base64.workspace = true sha2.workspace = true -rsmq_async.workspace = true url.workspace = true lazy_static.workspace = true once_cell.workspace = true @@ -231,7 +230,6 @@ async-stripe = { version = "0.39.1", features = [ ] } async_zip = { version = "0.0.11", features = ["full"] } once_cell = "1.17.1" -rsmq_async = { version = "5.1.5" } gosyn = "0.2.6" bytes = "1.4.0" gethostname = "0.4.3" diff --git a/backend/ee-repo-ref.txt b/backend/ee-repo-ref.txt index 083da864f7823..f5b5a34d5ead6 100644 --- a/backend/ee-repo-ref.txt +++ b/backend/ee-repo-ref.txt @@ -1 +1 @@ -4a0e64ab860eb88ff0d21b8705e647c9b76c46ee +446e4fbc59048bb11c18648ac094e57c0bfa4a28 \ No newline at end of file diff --git a/backend/src/main.rs b/backend/src/main.rs index eb6c423e0e171..23c0453a9623e 100644 --- a/backend/src/main.rs +++ b/backend/src/main.rs @@ -8,7 +8,8 @@ use anyhow::Context; use monitor::{ - reload_indexer_config, reload_timeout_wait_result_setting, send_current_log_file_to_object_store, send_logs_to_object_store + reload_indexer_config, reload_timeout_wait_result_setting, + send_current_log_file_to_object_store, send_logs_to_object_store, }; use rand::Rng; use sqlx::{postgres::PgListener, Pool, Postgres}; @@ -29,7 +30,15 @@ use windmill_common::ee::{maybe_renew_license_key_on_start, LICENSE_KEY_ID, LICE use windmill_common::{ global_settings::{ - BASE_URL_SETTING, BUNFIG_INSTALL_SCOPES_SETTING,CRITICAL_ALERT_MUTE_UI_SETTING, CRITICAL_ERROR_CHANNELS_SETTING, CUSTOM_TAGS_SETTING, DEFAULT_TAGS_PER_WORKSPACE_SETTING, DEFAULT_TAGS_WORKSPACES_SETTING, ENV_SETTINGS, EXPOSE_DEBUG_METRICS_SETTING, EXPOSE_METRICS_SETTING, EXTRA_PIP_INDEX_URL_SETTING, HUB_BASE_URL_SETTING, INDEXER_SETTING, JOB_DEFAULT_TIMEOUT_SECS_SETTING, JWT_SECRET_SETTING, KEEP_JOB_DIR_SETTING, LICENSE_KEY_SETTING, NPM_CONFIG_REGISTRY_SETTING, OAUTH_SETTING, PIP_INDEX_URL_SETTING, REQUEST_SIZE_LIMIT_SETTING, REQUIRE_PREEXISTING_USER_FOR_OAUTH_SETTING, RETENTION_PERIOD_SECS_SETTING, SAML_METADATA_SETTING, SCIM_TOKEN_SETTING, SMTP_SETTING, TIMEOUT_WAIT_RESULT_SETTING + BASE_URL_SETTING, BUNFIG_INSTALL_SCOPES_SETTING, CRITICAL_ALERT_MUTE_UI_SETTING, + CRITICAL_ERROR_CHANNELS_SETTING, CUSTOM_TAGS_SETTING, DEFAULT_TAGS_PER_WORKSPACE_SETTING, + DEFAULT_TAGS_WORKSPACES_SETTING, ENV_SETTINGS, EXPOSE_DEBUG_METRICS_SETTING, + EXPOSE_METRICS_SETTING, EXTRA_PIP_INDEX_URL_SETTING, HUB_BASE_URL_SETTING, INDEXER_SETTING, + JOB_DEFAULT_TIMEOUT_SECS_SETTING, JWT_SECRET_SETTING, KEEP_JOB_DIR_SETTING, + LICENSE_KEY_SETTING, NPM_CONFIG_REGISTRY_SETTING, OAUTH_SETTING, PIP_INDEX_URL_SETTING, + REQUEST_SIZE_LIMIT_SETTING, REQUIRE_PREEXISTING_USER_FOR_OAUTH_SETTING, + RETENTION_PERIOD_SECS_SETTING, SAML_METADATA_SETTING, SCIM_TOKEN_SETTING, SMTP_SETTING, + TIMEOUT_WAIT_RESULT_SETTING, }, scripts::ScriptLang, stats_ee::schedule_stats, @@ -332,27 +341,6 @@ async fn windmill_main() -> anyhow::Result<()> { IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)) }; - let rsmq_config = std::env::var("REDIS_URL").ok().map(|x| { - let url = x.parse::().unwrap(); - let mut config = rsmq_async::RsmqOptions { ..Default::default() }; - - config.host = url.host_str().expect("redis host required").to_owned(); - config.password = url.password().map(|s| s.to_owned()); - config.db = url - .path_segments() - .and_then(|mut segments| segments.next()) - .and_then(|segment| segment.parse().ok()) - .unwrap_or(0); - config.ns = url - .query_pairs() - .find(|s| s.0 == "rsmq_namespace") - .map(|s| s.1) - .unwrap_or(std::borrow::Cow::Borrowed("rsmq")) - .into_owned(); - config.port = url.port().unwrap_or(6379).to_string(); - config - }); - tracing::info!("Connecting to database..."); let db = windmill_common::connect_db(server_mode, indexer_mode).await?; tracing::info!("Database connected"); @@ -367,13 +355,6 @@ async fn windmill_main() -> anyhow::Result<()> { .unwrap_or_else(|| "UNKNOWN".to_string()) ); - let rsmq = if let Some(config) = rsmq_config { - tracing::info!("Redis config set: {:?}", config); - Some(rsmq_async::MultiplexedRsmq::new(config).await.unwrap()) - } else { - None - }; - let is_agent = mode == Mode::Agent; if !is_agent { @@ -488,7 +469,6 @@ Windmill Community Edition {GIT_VERSION} monitor_db( &db, &base_internal_url, - rsmq.clone(), server_mode, worker_mode, true, @@ -507,7 +487,6 @@ Windmill Community Edition {GIT_VERSION} let addr = SocketAddr::from((server_bind_address, port)); - let rsmq2 = rsmq.clone(); let (base_internal_tx, base_internal_rx) = tokio::sync::oneshot::channel::(); DirBuilder::new() @@ -588,7 +567,6 @@ Windmill Community Edition {GIT_VERSION} if !is_agent { windmill_api::run_server( db.clone(), - rsmq2, index_reader, log_index_reader, addr, @@ -620,7 +598,6 @@ Windmill Community Edition {GIT_VERSION} killpill_tx.clone(), num_workers, base_internal_url.clone(), - rsmq.clone(), mode.clone() == Mode::Agent, hostname.clone(), ) @@ -636,13 +613,12 @@ Windmill Community Edition {GIT_VERSION} killpill_phase2_tx.send(())?; tracing::info!("Phase 2 of shutdown completed"); } - Ok(()) as anyhow::Result<()> + Ok(()) }; let monitor_f = async { let db = db.clone(); let tx = killpill_tx.clone(); - let rsmq = rsmq.clone(); let base_internal_url = base_internal_url.to_string(); let h = tokio::spawn(async move { @@ -659,7 +635,6 @@ Windmill Community Edition {GIT_VERSION} monitor_db( &db, &base_internal_url, - rsmq.clone(), server_mode, worker_mode, false, @@ -950,13 +925,12 @@ fn display_config(envs: &[&str]) { ) } -pub async fn run_workers( +pub async fn run_workers( db: Pool, mut rx: tokio::sync::broadcast::Receiver<()>, tx: tokio::sync::broadcast::Sender<()>, num_workers: i32, base_internal_url: String, - rsmq: Option, agent_mode: bool, hostname: String, ) -> anyhow::Result<()> { @@ -1028,7 +1002,6 @@ pub async fn run_workers () { { tracing::error!("Error deleting log file: {:?}", e); } - if let Err(e) = sqlx::query!( - "DELETE FROM job WHERE id = ANY($1)", - &deleted_jobs - ) - .execute(&mut *tx) - .await + if let Err(e) = + sqlx::query!("DELETE FROM job WHERE id = ANY($1)", &deleted_jobs) + .execute(&mut *tx) + .await { tracing::error!("Error deleting job: {:?}", e); } @@ -1031,7 +1028,6 @@ pub async fn monitor_pool(db: &DB) { pub async fn monitor_db( db: &Pool, base_internal_url: &str, - rsmq: Option, server_mode: bool, _worker_mode: bool, initial_load: bool, @@ -1039,8 +1035,8 @@ pub async fn monitor_db( ) { let zombie_jobs_f = async { if server_mode && !initial_load { - handle_zombie_jobs(db, base_internal_url, rsmq.clone(), "server").await; - match handle_zombie_flows(db, rsmq.clone()).await { + handle_zombie_jobs(db, base_internal_url, "server").await; + match handle_zombie_flows(db).await { Err(err) => { tracing::error!("Error handling zombie flows: {:?}", err); } @@ -1312,12 +1308,7 @@ pub async fn reload_base_url_setting(db: &DB) -> error::Result<()> { Ok(()) } -async fn handle_zombie_jobs( - db: &Pool, - base_internal_url: &str, - rsmq: Option, - worker_name: &str, -) { +async fn handle_zombie_jobs(db: &Pool, base_internal_url: &str, worker_name: &str) { if *RESTART_ZOMBIE_JOBS { let restarted = sqlx::query!( "UPDATE queue SET running = false, started_at = null @@ -1426,7 +1417,6 @@ async fn handle_zombie_jobs true, same_worker_tx_never_used, "", - rsmq.clone(), worker_name, send_result_never_used, #[cfg(feature = "benchmark")] @@ -1436,10 +1426,7 @@ async fn handle_zombie_jobs } } -async fn handle_zombie_flows( - db: &DB, - rsmq: Option, -) -> error::Result<()> { +async fn handle_zombie_flows(db: &DB) -> error::Result<()> { let flows = sqlx::query_as::<_, QueuedJob>( r#" SELECT * @@ -1486,7 +1473,7 @@ async fn handle_zombie_flows( } ); report_critical_error(reason.clone(), db.clone(), Some(&flow.workspace_id), None).await; - cancel_zombie_flow_job(db, flow, &rsmq, reason).await?; + cancel_zombie_flow_job(db, flow, reason).await?; } } @@ -1516,7 +1503,7 @@ async fn handle_zombie_flows( job.workspace_id, flow.last_ping ); - cancel_zombie_flow_job(db, job, &rsmq, + cancel_zombie_flow_job(db, job, format!("Flow {} cancelled as one of the parallel branch {} was unable to make the last transition ", flow.parent_flow_id, flow.job_id)) .await?; } else { @@ -1529,7 +1516,6 @@ async fn handle_zombie_flows( async fn cancel_zombie_flow_job( db: &Pool, flow: QueuedJob, - rsmq: &Option, message: String, ) -> Result<(), error::Error> { let tx = db.begin().await.unwrap(); @@ -1545,7 +1531,6 @@ async fn cancel_zombie_flow_job( flow.workspace_id.as_str(), tx, db, - rsmq.clone(), true, false, ) diff --git a/backend/tests/worker.rs b/backend/tests/worker.rs index bfb3ef583dc00..88ac603c19079 100644 --- a/backend/tests/worker.rs +++ b/backend/tests/worker.rs @@ -126,7 +126,6 @@ impl ApiServer { db.clone(), None, None, - None, addr, rx, port_tx, @@ -899,8 +898,8 @@ impl RunJob { hm_args.insert(k, windmill_common::worker::to_raw_value(&v)); } - let tx = PushIsolationLevel::IsolatedRoot(db.clone(), None); - let (uuid, tx) = windmill_queue::push::( + let tx = PushIsolationLevel::IsolatedRoot(db.clone()); + let (uuid, tx) = windmill_queue::push( &db, tx, "test-workspace", @@ -1018,7 +1017,7 @@ fn spawn_test_worker( windmill_common::worker::make_suspended_pull_query(&wc).await; windmill_common::worker::make_pull_query(&wc).await; } - windmill_worker::run_worker::( + windmill_worker::run_worker( &db, worker_instance, worker_name, @@ -1028,7 +1027,6 @@ fn spawn_test_worker( rx, tx2, &base_internal_url, - None, false, ) .await diff --git a/backend/windmill-api/Cargo.toml b/backend/windmill-api/Cargo.toml index 02a2de9265c07..f05c2983c8954 100644 --- a/backend/windmill-api/Cargo.toml +++ b/backend/windmill-api/Cargo.toml @@ -77,7 +77,6 @@ async-stripe = { workspace = true, optional = true } lazy_static.workspace = true prometheus = { workspace = true, optional = true } async_zip.workspace = true -rsmq_async.workspace = true regex.workspace = true bytes.workspace = true samael = { workspace = true, optional = true } diff --git a/backend/windmill-api/src/apps.rs b/backend/windmill-api/src/apps.rs index 1b6ce7fb7681c..5e236baeb6c86 100644 --- a/backend/windmill-api/src/apps.rs +++ b/backend/windmill-api/src/apps.rs @@ -64,7 +64,7 @@ use windmill_common::{ }; use windmill_git_sync::{handle_deployment_metadata, DeployedObject}; -use windmill_queue::{push, PushArgs, PushArgsOwned, PushIsolationLevel, QueueTransaction}; +use windmill_queue::{push, PushArgs, PushArgsOwned, PushIsolationLevel}; pub fn workspaced_service() -> Router { Router::new() @@ -651,12 +651,11 @@ async fn create_app( authed: ApiAuthed, Extension(user_db): Extension, Extension(db): Extension, - Extension(rsmq): Extension>, Extension(webhook): Extension, Path(w_id): Path, Json(mut app): Json, ) -> Result<(StatusCode, String)> { - let mut tx: QueueTransaction<'_, _> = (rsmq, user_db.clone().begin(&authed).await?).into(); + let mut tx = user_db.clone().begin(&authed).await?; app.policy.on_behalf_of = Some(username_to_permissioned_as(&authed.username)); app.policy.on_behalf_of_email = Some(authed.email.clone()); @@ -670,7 +669,7 @@ async fn create_app( &app.path, w_id ) - .fetch_one(&mut tx) + .fetch_one(&mut *tx) .await? .unwrap_or(false); @@ -686,7 +685,7 @@ async fn create_app( &app.path, &w_id ) - .execute(&mut tx) + .execute(&mut *tx) .await?; let id = sqlx::query_scalar!( @@ -699,7 +698,7 @@ async fn create_app( json!(app.policy), app.draft_only, ) - .fetch_one(&mut tx) + .fetch_one(&mut *tx) .await?; let v_id = sqlx::query_scalar!( @@ -711,7 +710,7 @@ async fn create_app( serde_json::to_string(&app.value).unwrap(), authed.username, ) - .fetch_one(&mut tx) + .fetch_one(&mut *tx) .await?; sqlx::query!( @@ -719,11 +718,11 @@ async fn create_app( v_id, id ) - .execute(&mut tx) + .execute(&mut *tx) .await?; audit_log( - &mut tx, + &mut *tx, &authed, "apps.create", ActionKind::Create, @@ -809,7 +808,6 @@ async fn delete_app( authed: ApiAuthed, Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Extension(webhook): Extension, Path((w_id, path)): Path<(String, StripPath)>, ) -> Result { @@ -862,7 +860,6 @@ async fn delete_app( version: 0, // dummy version as it will not get inserted in db }, Some(format!("App '{}' deleted", path)), - rsmq, true, ) .await?; @@ -893,7 +890,6 @@ async fn update_app( Extension(db): Extension, Extension(user_db): Extension, Extension(webhook): Extension, - Extension(rsmq): Extension>, Path((w_id, path)): Path<(String, StripPath)>, Json(ns): Json, ) -> Result { @@ -901,7 +897,7 @@ async fn update_app( let path = path.to_path(); - let mut tx: QueueTransaction<'_, _> = (rsmq, user_db.clone().begin(&authed).await?).into(); + let mut tx = user_db.clone().begin(&authed).await?; let npath = if ns.policy.is_some() || ns.path.is_some() || ns.summary.is_some() { let mut sqlb = SqlBuilder::update_table("app"); @@ -918,7 +914,7 @@ async fn update_app( npath, w_id ) - .fetch_one(&mut tx) + .fetch_one(&mut *tx) .await? .unwrap_or(false); @@ -950,7 +946,7 @@ async fn update_app( sqlb.returning("path"); let sql = sqlb.sql().map_err(|e| Error::InternalErr(e.to_string()))?; - let npath_o: Option = sqlx::query_scalar(&sql).fetch_optional(&mut tx).await?; + let npath_o: Option = sqlx::query_scalar(&sql).fetch_optional(&mut *tx).await?; not_found_if_none(npath_o, "App", path)? } else { path.to_owned() @@ -961,7 +957,7 @@ async fn update_app( npath, w_id ) - .fetch_one(&mut tx) + .fetch_one(&mut *tx) .await?; let v_id = sqlx::query_scalar!( @@ -973,7 +969,7 @@ async fn update_app( serde_json::to_string(&nvalue).unwrap(), authed.username, ) - .fetch_one(&mut tx) + .fetch_one(&mut *tx) .await?; sqlx::query!( @@ -982,7 +978,7 @@ async fn update_app( npath, w_id ) - .execute(&mut tx) + .execute(&mut *tx) .await?; v_id } else { @@ -991,7 +987,7 @@ async fn update_app( npath, w_id ) - .fetch_one(&mut tx) + .fetch_one(&mut *tx) .await?; if let Some(v_id) = v_id { v_id @@ -1008,11 +1004,11 @@ async fn update_app( path, &w_id ) - .execute(&mut tx) + .execute(&mut *tx) .await?; audit_log( - &mut tx, + &mut *tx, &authed, "apps.update", ActionKind::Update, @@ -1022,8 +1018,7 @@ async fn update_app( ) .await?; - let tx: PushIsolationLevel<'_, rsmq_async::MultiplexedRsmq> = - PushIsolationLevel::Transaction(tx); + let tx = PushIsolationLevel::Transaction(tx); let mut args: HashMap> = HashMap::new(); if let Some(dm) = ns.deployment_message { args.insert("deployment_message".to_string(), to_raw_value(&dm)); @@ -1138,7 +1133,6 @@ async fn execute_component( OptAuthed(opt_authed): OptAuthed, Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Path((w_id, path)): Path<(String, StripPath)>, Json(payload): Json, ) -> Result { @@ -1252,7 +1246,7 @@ async fn execute_component( } _ => unreachable!(), }; - let tx = windmill_queue::PushIsolationLevel::IsolatedRoot(db.clone(), rsmq); + let tx = windmill_queue::PushIsolationLevel::IsolatedRoot(db.clone()); let (uuid, tx) = push( &db, diff --git a/backend/windmill-api/src/drafts.rs b/backend/windmill-api/src/drafts.rs index a91d00d2fa3a3..6ba1f4452fc4c 100644 --- a/backend/windmill-api/src/drafts.rs +++ b/backend/windmill-api/src/drafts.rs @@ -128,7 +128,7 @@ async fn delete_draft( // path, // w_id // ) -// .fetch_optional(&mut tx) +// .fetch_optional(&mut *tx) // .await?; // tx.commit().await?; diff --git a/backend/windmill-api/src/flows.rs b/backend/windmill-api/src/flows.rs index f02f91dc41262..66560ac37b3ed 100644 --- a/backend/windmill-api/src/flows.rs +++ b/backend/windmill-api/src/flows.rs @@ -46,7 +46,7 @@ use windmill_common::{ utils::{http_get_from_hub, not_found_if_none, paginate, Pagination, StripPath}, }; use windmill_git_sync::{handle_deployment_metadata, DeployedObject}; -use windmill_queue::{push, schedule::push_scheduled_job, PushIsolationLevel, QueueTransaction}; +use windmill_queue::{push, schedule::push_scheduled_job, PushIsolationLevel}; pub fn workspaced_service() -> Router { Router::new() @@ -327,7 +327,6 @@ async fn create_flow( authed: ApiAuthed, Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Extension(webhook): Extension, Path(w_id): Path, Json(nf): Json, @@ -348,10 +347,10 @@ async fn create_flow( // cron::Schedule::from_str(&ns.schedule).map_err(|e| error::Error::BadRequest(e.to_string()))?; let authed = maybe_refresh_folders(&nf.path, &w_id, authed, &db).await; - let mut tx: QueueTransaction<'_, _> = (rsmq, user_db.clone().begin(&authed).await?).into(); + let mut tx = user_db.clone().begin(&authed).await?; - check_path_conflict(tx.transaction_mut(), &w_id, &nf.path).await?; - check_schedule_conflict(tx.transaction_mut(), &w_id, &nf.path).await?; + check_path_conflict(&mut tx, &w_id, &nf.path).await?; + check_schedule_conflict(&mut tx, &w_id, &nf.path).await?; let schema_str = nf.schema.and_then(|x| serde_json::to_string(&x.0).ok()); @@ -371,7 +370,7 @@ async fn create_flow( schema_str, &authed.username, ) - .execute(&mut tx) + .execute(&mut *tx) .await?; let version = sqlx::query_scalar!( @@ -384,7 +383,7 @@ async fn create_flow( schema_str, &authed.username, ) - .fetch_one(&mut tx) + .fetch_one(&mut *tx) .await?; sqlx::query!( @@ -392,18 +391,18 @@ async fn create_flow( version, nf.path, w_id - ).execute(&mut tx).await?; + ).execute(&mut *tx).await?; sqlx::query!( "DELETE FROM draft WHERE path = $1 AND workspace_id = $2 AND typ = 'flow'", nf.path, &w_id ) - .execute(&mut tx) + .execute(&mut *tx) .await?; audit_log( - &mut tx, + &mut *tx, &authed, "flows.create", ActionKind::Create, @@ -460,7 +459,7 @@ async fn create_flow( nf.path, w_id ) - .execute(&mut new_tx) + .execute(&mut *new_tx) .await?; new_tx.commit().await?; @@ -638,7 +637,6 @@ async fn update_flow_history( async fn update_flow( authed: ApiAuthed, Extension(user_db): Extension, - Extension(rsmq): Extension>, Extension(db): Extension, Extension(webhook): Extension, Path((w_id, flow_path)): Path<(String, StripPath)>, @@ -660,9 +658,9 @@ async fn update_flow( let flow_path = flow_path.to_path(); let authed = maybe_refresh_folders(&flow_path, &w_id, authed, &db).await; - let mut tx: QueueTransaction<'_, _> = (rsmq, user_db.clone().begin(&authed).await?).into(); + let mut tx = user_db.clone().begin(&authed).await?; - check_schedule_conflict(tx.transaction_mut(), &w_id, flow_path).await?; + check_schedule_conflict(&mut tx, &w_id, flow_path).await?; let schema = nf.schema.map(|x| x.0); let old_dep_job = sqlx::query_scalar!( @@ -670,7 +668,7 @@ async fn update_flow( flow_path, w_id ) - .fetch_optional(&mut tx) + .fetch_optional(&mut *tx) .await?; let old_dep_job = not_found_if_none(old_dep_job, "Flow", flow_path)?; @@ -695,7 +693,7 @@ async fn update_flow( flow_path, w_id, ) - .execute(&mut tx) + .execute(&mut *tx) .await.map_err(|e| error::Error::InternalErr(format!("Error updating flow due to flow update: {e:#}")))?; if is_new_path { @@ -710,7 +708,7 @@ async fn update_flow( flow_path, w_id ) - .execute(&mut tx) + .execute(&mut *tx) .await .map_err(|e| { error::Error::InternalErr(format!("Error updating flow due to create new flow: {e:#}")) @@ -722,7 +720,7 @@ async fn update_flow( flow_path, w_id ) - .execute(&mut tx) + .execute(&mut *tx) .await .map_err(|e| { error::Error::InternalErr(format!( @@ -735,7 +733,7 @@ async fn update_flow( flow_path, w_id ) - .execute(&mut tx) + .execute(&mut *tx) .await .map_err(|e| { error::Error::InternalErr(format!( @@ -752,7 +750,7 @@ async fn update_flow( schema_str, &authed.username, ) - .fetch_one(&mut tx) + .fetch_one(&mut *tx) .await .map_err(|e| { error::Error::InternalErr(format!( @@ -763,10 +761,10 @@ async fn update_flow( sqlx::query!( "UPDATE flow SET versions = array_append(versions, $1) WHERE path = $2 AND workspace_id = $3", version, nf.path, w_id - ).execute(&mut tx).await?; + ).execute(&mut *tx).await?; if is_new_path { - check_schedule_conflict(tx.transaction_mut(), &w_id, &nf.path).await?; + check_schedule_conflict(&mut tx, &w_id, &nf.path).await?; if !authed.is_admin { require_owner_of_path(&authed, flow_path)?; @@ -778,7 +776,7 @@ async fn update_flow( .bind(&nf.path) .bind(&flow_path) .bind(&w_id) - .fetch_all(&mut tx) + .fetch_all(&mut *tx) .await.map_err(|e| error::Error::InternalErr(format!("Error updating flow due to related schedules update: {e:#}")))?; let schedule = sqlx::query_as::<_, Schedule>( @@ -786,16 +784,16 @@ async fn update_flow( .bind(&nf.path) .bind(&flow_path) .bind(&w_id) - .fetch_optional(&mut tx) + .fetch_optional(&mut *tx) .await.map_err(|e| error::Error::InternalErr(format!("Error updating flow due to related schedule update: {e:#}")))?; if let Some(schedule) = schedule { - clear_schedule(tx.transaction_mut(), &flow_path, &w_id).await?; + clear_schedule(&mut tx, &flow_path, &w_id).await?; schedulables.push(schedule); } for schedule in schedulables.into_iter() { - clear_schedule(tx.transaction_mut(), &schedule.path, &w_id).await?; + clear_schedule(&mut tx, &schedule.path, &w_id).await?; if schedule.enabled { tx = push_scheduled_job(&db, tx, &schedule, None).await?; @@ -807,11 +805,11 @@ async fn update_flow( flow_path, &w_id ) - .execute(&mut tx) + .execute(&mut *tx) .await?; audit_log( - &mut tx, + &mut *tx, &authed, "flows.update", ActionKind::Create, @@ -878,7 +876,7 @@ async fn update_flow( nf.path, w_id ) - .execute(&mut new_tx) + .execute(&mut *new_tx) .await .map_err(|e| { error::Error::InternalErr(format!( @@ -890,7 +888,7 @@ async fn update_flow( "UPDATE queue SET canceled = true WHERE id = $1", old_dep_job ) - .execute(&mut new_tx) + .execute(&mut *new_tx) .await .map_err(|e| { error::Error::InternalErr(format!( @@ -1041,7 +1039,6 @@ async fn archive_flow_by_path( Extension(db): Extension, Extension(user_db): Extension, Extension(webhook): Extension, - Extension(rsmq): Extension>, Path((w_id, path)): Path<(String, StripPath)>, Json(archived): Json, ) -> Result { @@ -1088,7 +1085,6 @@ async fn archive_flow_by_path( "unarchived" } )), - rsmq, true, ) .await?; @@ -1105,7 +1101,6 @@ async fn delete_flow_by_path( authed: ApiAuthed, Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Extension(webhook): Extension, Path((w_id, path)): Path<(String, StripPath)>, ) -> Result { @@ -1151,7 +1146,6 @@ async fn delete_flow_by_path( version: 0, // dummy version as it will not get inserted in db }, Some(format!("Flow '{}' deleted", path)), - rsmq, true, ) .await?; diff --git a/backend/windmill-api/src/folders.rs b/backend/windmill-api/src/folders.rs index c9b69836c6a69..a2ece7b55b978 100644 --- a/backend/windmill-api/src/folders.rs +++ b/backend/windmill-api/src/folders.rs @@ -161,7 +161,6 @@ async fn create_folder( Extension(db): Extension, Extension(user_db): Extension, Extension(webhook): Extension, - Extension(rsmq): Extension>, Extension(cache): Extension>, Path(w_id): Path, Json(ng): Json, @@ -223,7 +222,6 @@ async fn create_folder( &w_id, DeployedObject::Folder { path: format!("f/{}", ng.name) }, Some(format!("Folder '{}' created", ng.name)), - rsmq, true, ) .await?; @@ -278,7 +276,6 @@ async fn update_folder( Extension(db): Extension, Extension(user_db): Extension, Extension(webhook): Extension, - Extension(rsmq): Extension>, Path((w_id, name)): Path<(String, String)>, Json(mut ng): Json, ) -> Result { @@ -376,7 +373,6 @@ async fn update_folder( &w_id, DeployedObject::Folder { path: format!("f/{}", name) }, Some(format!("Folder '{}' updated", name)), - rsmq, true, ) .await?; @@ -523,7 +519,6 @@ async fn delete_folder( authed: ApiAuthed, Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Extension(webhook): Extension, Path((w_id, name)): Path<(String, String)>, ) -> Result { @@ -566,7 +561,6 @@ async fn delete_folder( &w_id, DeployedObject::Folder { path: format!("f/{}", name) }, Some(format!("Folder '{}' deleted", name)), - rsmq, true, ) .await?; diff --git a/backend/windmill-api/src/granular_acls.rs b/backend/windmill-api/src/granular_acls.rs index 994d6cd196e95..da2d6655e7300 100644 --- a/backend/windmill-api/src/granular_acls.rs +++ b/backend/windmill-api/src/granular_acls.rs @@ -55,7 +55,6 @@ async fn add_granular_acl( authed: ApiAuthed, Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Path((w_id, path)): Path<(String, StripPath)>, Json(GranularAcl { owner, write }): Json, ) -> Result { @@ -91,10 +90,9 @@ async fn add_granular_acl( if kind == "folder" { if let Some(obj) = sqlx::query_scalar!( "SELECT owners FROM folder WHERE name = $1 AND workspace_id = $2", - path, + path, w_id ) - .fetch_optional(&mut *tx) .await? { @@ -131,8 +129,7 @@ async fn add_granular_acl( &w_id, DeployedObject::Folder { path: format!("f/{}", path) }, Some(format!("Folder '{}' changed permissions", path)), - rsmq, - true, + true, ) .await? } @@ -144,8 +141,7 @@ async fn add_granular_acl( // &w_id, // DeployedObject::App { path: path.to_string(), parent_path: None, version: 0 }, // Some(format!("App '{}' changed permissions", path)), - // rsmq, - // true, + // // true, // ) // .await? // } @@ -161,8 +157,7 @@ async fn add_granular_acl( // hash: ScriptHash(0), // }, // Some(format!("Script '{}' changed permissions", path)), - // rsmq, - // true, + // // true, // ) // .await? // } @@ -174,8 +169,7 @@ async fn add_granular_acl( // &w_id, // DeployedObject::Flow { path: path.to_string(), parent_path: None }, // Some(format!("Flow '{}' changed permissions", path)), - // rsmq, - // true, + // // true, // ) // .await? // } @@ -189,7 +183,6 @@ async fn remove_granular_acl( authed: ApiAuthed, Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Path((w_id, path)): Path<(String, StripPath)>, Json(GranularAcl { owner, write: _ }): Json, ) -> Result { @@ -248,8 +241,7 @@ async fn remove_granular_acl( &w_id, DeployedObject::Folder { path: format!("f/{}", path) }, Some(format!("Folder '{}' changed permissions", path)), - rsmq, - true, + true, ) .await? } @@ -261,8 +253,7 @@ async fn remove_granular_acl( // &w_id, // DeployedObject::App { path: path.to_string(), parent_path: None, version: 0 }, // Some(format!("App '{}' changed permissions", path)), - // rsmq, - // true, + // // true, // ) // .await? // } @@ -278,8 +269,7 @@ async fn remove_granular_acl( // hash: ScriptHash(0), // }, // Some(format!("Script '{}' changed permissions", path)), - // rsmq, - // true, + // // true, // ) // .await? // } @@ -291,8 +281,7 @@ async fn remove_granular_acl( // &w_id, // DeployedObject::Flow { path: path.to_string(), parent_path: None }, // Some(format!("Flow '{}' changed permissions", path)), - // rsmq, - // true, + // // true, // ) // .await? // } diff --git a/backend/windmill-api/src/groups.rs b/backend/windmill-api/src/groups.rs index b2373c69de4cc..b63d0f9de8ad0 100644 --- a/backend/windmill-api/src/groups.rs +++ b/backend/windmill-api/src/groups.rs @@ -223,7 +223,6 @@ async fn create_group( authed: ApiAuthed, Extension(_db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Path(w_id): Path, Json(ng): Json, ) -> Result { @@ -273,7 +272,6 @@ async fn create_group( &w_id, windmill_git_sync::DeployedObject::Group { name: ng.name.clone() }, Some(format!("Created group '{}'", &ng.name)), - rsmq, true, ) .await?; @@ -442,7 +440,6 @@ async fn delete_group( authed: ApiAuthed, Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Path((w_id, name)): Path<(String, String)>, ) -> Result { let mut tx = user_db.begin(&authed).await?; @@ -492,7 +489,6 @@ async fn delete_group( &w_id, windmill_git_sync::DeployedObject::Group { name: name.clone() }, Some(format!("Deleted group '{}'", &name)), - rsmq, true, ) .await?; @@ -504,7 +500,6 @@ async fn update_group( authed: ApiAuthed, Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Path((w_id, name)): Path<(String, String)>, Json(eg): Json, ) -> Result { @@ -542,7 +537,6 @@ async fn update_group( &w_id, windmill_git_sync::DeployedObject::Group { name: name.clone() }, Some(format!("Updated group '{}'", &name)), - rsmq, true, ) .await?; @@ -554,7 +548,6 @@ async fn add_user( authed: ApiAuthed, Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Path((w_id, name)): Path<(String, String)>, Json(Username { username: user_username }): Json, ) -> Result { @@ -593,7 +586,6 @@ async fn add_user( &w_id, windmill_git_sync::DeployedObject::Group { name: name.clone() }, Some(format!("Added user to group '{}'", &name)), - rsmq, true, ) .await?; @@ -712,7 +704,6 @@ async fn remove_user( authed: ApiAuthed, Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Path((w_id, name)): Path<(String, String)>, Json(Username { username: user_username }): Json, ) -> Result { @@ -754,7 +745,6 @@ async fn remove_user( &w_id, windmill_git_sync::DeployedObject::Group { name: name.clone() }, Some(format!("Removed user from group '{}'", &name)), - rsmq, true, ) .await?; diff --git a/backend/windmill-api/src/http_triggers.rs b/backend/windmill-api/src/http_triggers.rs index feeb9977a8822..685319e05a4c4 100644 --- a/backend/windmill-api/src/http_triggers.rs +++ b/backend/windmill-api/src/http_triggers.rs @@ -517,7 +517,6 @@ async fn get_http_route_trigger( async fn route_job( Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Path(route_path): Path, OptAuthed(opt_authed): OptAuthed, Query(query): Query>, @@ -654,8 +653,7 @@ async fn route_job( authed, db, user_db, - rsmq, - trigger.workspace_id.clone(), + trigger.workspace_id.clone(), StripPath(trigger.script_path.to_owned()), run_query, args, @@ -669,8 +667,7 @@ async fn route_job( run_query, StripPath(trigger.script_path.to_owned()), authed, - rsmq, - user_db, + user_db, args, trigger.workspace_id.clone(), label_prefix, @@ -684,8 +681,7 @@ async fn route_job( authed, db, user_db, - rsmq, - trigger.workspace_id.clone(), + trigger.workspace_id.clone(), StripPath(trigger.script_path.to_owned()), run_query, args, @@ -699,8 +695,7 @@ async fn route_job( run_query, StripPath(trigger.script_path.to_owned()), authed, - rsmq, - user_db, + user_db, trigger.workspace_id.clone(), args, label_prefix, diff --git a/backend/windmill-api/src/jobs.rs b/backend/windmill-api/src/jobs.rs index 3aa1e48f33282..0197870c2e98d 100644 --- a/backend/windmill-api/src/jobs.rs +++ b/backend/windmill-api/src/jobs.rs @@ -347,7 +347,6 @@ async fn get_db_clock(Extension(db): Extension) -> windmill_common::error::J } async fn cancel_job_api( - Extension(rsmq): Extension>, OptAuthed(opt_authed): OptAuthed, Extension(db): Extension, Path((w_id, id)): Path<(String, Uuid)>, @@ -373,7 +372,6 @@ async fn cancel_job_api( &w_id, tx, &db, - rsmq, false, opt_authed.is_none(), ), @@ -412,7 +410,6 @@ async fn cancel_job_api( } async fn cancel_persistent_script_api( - Extension(rsmq): Extension>, OptAuthed(opt_authed): OptAuthed, Extension(db): Extension, Path((w_id, script_path)): Path<(String, StripPath)>, @@ -433,7 +430,6 @@ async fn cancel_persistent_script_api( script_path.to_path(), &w_id, &db, - rsmq, ) .await?; @@ -462,7 +458,6 @@ async fn cancel_persistent_script_api( } async fn force_cancel( - Extension(rsmq): Extension>, OptAuthed(opt_authed): OptAuthed, Extension(db): Extension, Path((w_id, id)): Path<(String, Uuid)>, @@ -488,7 +483,6 @@ async fn force_cancel( &w_id, tx, &db, - rsmq, true, opt_authed.is_none(), ), @@ -1375,7 +1369,6 @@ async fn cancel_jobs( db: &DB, username: &str, w_id: &str, - rsmq: Option, ) -> error::JsonResult> { let mut uuids = vec![]; let mut tx = db.begin().await?; @@ -1465,7 +1458,6 @@ async fn cancel_jobs( if trivial_jobs.contains(&job_id) { continue; } - let rsmq = rsmq.clone(); match tokio::time::timeout(tokio::time::Duration::from_secs(5), async move { let tx = db.begin().await?; let (tx, _) = windmill_queue::cancel_job( @@ -1475,7 +1467,6 @@ async fn cancel_jobs( w_id, tx, db, - rsmq, false, false, ) @@ -1511,7 +1502,6 @@ async fn cancel_selection( authed: ApiAuthed, Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Path(w_id): Path, Json(jobs): Json>, @@ -1525,14 +1515,7 @@ async fn cancel_selection( .await?; tx.commit().await?; - cancel_jobs( - jobs_to_cancel, - &db, - authed.username.as_str(), - w_id.as_str(), - rsmq, - ) - .await + cancel_jobs(jobs_to_cancel, &db, authed.username.as_str(), w_id.as_str()).await } async fn list_filtered_uuids( @@ -2867,22 +2850,17 @@ pub async fn run_flow_by_path( authed: ApiAuthed, Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Path((w_id, flow_path)): Path<(String, StripPath)>, Query(run_query): Query, args: PushArgsOwned, ) -> error::Result<(StatusCode, String)> { - run_flow_by_path_inner( - authed, db, user_db, rsmq, w_id, flow_path, run_query, args, None, - ) - .await + run_flow_by_path_inner(authed, db, user_db, w_id, flow_path, run_query, args, None).await } pub async fn run_flow_by_path_inner( authed: ApiAuthed, db: DB, user_db: UserDB, - rsmq: Option, w_id: String, flow_path: StripPath, run_query: RunJobQuery, @@ -2916,7 +2894,7 @@ pub async fn run_flow_by_path_inner( check_tag_available_for_workspace(&w_id, &tag).await?; let scheduled_for = run_query.get_scheduled_for(&db).await?; - let tx = PushIsolationLevel::Isolated(user_db, authed.clone().into(), rsmq); + let tx = PushIsolationLevel::Isolated(user_db, authed.clone().into()); let (uuid, tx) = push( &db, tx, @@ -2958,7 +2936,6 @@ pub async fn restart_flow( _authed: ApiAuthed, Extension(_db): Extension, Extension(_user_db): Extension, - Extension(_rsmq): Extension>, Path((_w_id, _job_id, _step_id, _branch_or_iteration_n)): Path<( String, Uuid, @@ -2977,7 +2954,6 @@ pub async fn restart_flow( authed: ApiAuthed, Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Path((w_id, job_id, step_id, branch_or_iteration_n)): Path<( String, Uuid, @@ -3011,7 +2987,7 @@ pub async fn restart_flow( let scheduled_for = run_query.get_scheduled_for(&db).await?; - let tx = PushIsolationLevel::Isolated(user_db, authed.clone().into(), rsmq); + let tx = PushIsolationLevel::Isolated(user_db, authed.clone().into()); let (uuid, tx) = push( &db, @@ -3050,7 +3026,6 @@ pub async fn run_script_by_path( authed: ApiAuthed, Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Path((w_id, script_path)): Path<(String, StripPath)>, Query(run_query): Query, args: PushArgsOwned, @@ -3059,7 +3034,6 @@ pub async fn run_script_by_path( authed, db, user_db, - rsmq, w_id, script_path, run_query, @@ -3073,7 +3047,6 @@ pub async fn run_script_by_path_inner( authed: ApiAuthed, db: DB, user_db: UserDB, - rsmq: Option, w_id: String, script_path: StripPath, run_query: RunJobQuery, @@ -3094,7 +3067,7 @@ pub async fn run_script_by_path_inner( let tag = run_query.tag.clone().or(tag); check_tag_available_for_workspace(&w_id, &tag).await?; - let tx = PushIsolationLevel::Isolated(user_db, authed.clone().into(), rsmq); + let tx = PushIsolationLevel::Isolated(user_db, authed.clone().into()); let (uuid, tx) = push( &db, @@ -3136,7 +3109,6 @@ pub async fn run_workflow_as_code( authed: ApiAuthed, Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Path((w_id, job_id, entrypoint)): Path<(String, Uuid, String)>, Query(run_query): Query, Query(wkflow_query): Query, @@ -3212,7 +3184,7 @@ pub async fn run_workflow_as_code( i += 1; } - let tx = PushIsolationLevel::Isolated(user_db, authed.clone().into(), rsmq); + let tx = PushIsolationLevel::Isolated(user_db, authed.clone().into()); if *CLOUD_HOSTED { tracing::info!("workflow_as_code_tracing id {i} "); @@ -3257,7 +3229,7 @@ pub async fn run_workflow_as_code( job_id, w_id, entrypoint - ).execute(&mut tx).await?; + ).execute(&mut *tx).await?; } else { tracing::info!("Skipping update of flow status for job {job_id} in workspace {w_id}"); } @@ -3303,7 +3275,6 @@ impl Drop for Guard { &w_id, tx, &db, - None, false, false, ) @@ -3601,7 +3572,6 @@ async fn log_job_view( pub async fn run_wait_result_job_by_path_get( method: hyper::http::Method, authed: ApiAuthed, - Extension(rsmq): Extension>, Extension(user_db): Extension, Extension(db): Extension, Path((w_id, script_path)): Path<(String, StripPath)>, @@ -3640,7 +3610,7 @@ pub async fn run_wait_result_job_by_path_get( let tag = run_query.tag.clone().or(tag); check_tag_available_for_workspace(&w_id, &tag).await?; - let tx = PushIsolationLevel::Isolated(user_db, authed.clone().into(), rsmq); + let tx = PushIsolationLevel::Isolated(user_db, authed.clone().into()); let (uuid, tx) = push( &db, @@ -3679,7 +3649,6 @@ pub async fn run_wait_result_job_by_path_get( pub async fn run_wait_result_flow_by_path_get( method: hyper::http::Method, authed: ApiAuthed, - Extension(rsmq): Extension>, Extension(user_db): Extension, Extension(db): Extension, Path((w_id, flow_path)): Path<(String, StripPath)>, @@ -3711,7 +3680,7 @@ pub async fn run_wait_result_flow_by_path_get( let args = PushArgsOwned { extra: Some(payload_args), args: HashMap::new() }; run_wait_result_flow_by_path_internal( - db, run_query, flow_path, authed, rsmq, user_db, args, w_id, None, + db, run_query, flow_path, authed, user_db, args, w_id, None, ) .await } @@ -3719,7 +3688,6 @@ pub async fn run_wait_result_flow_by_path_get( pub async fn run_wait_result_script_by_path( authed: ApiAuthed, Extension(user_db): Extension, - Extension(rsmq): Extension>, Extension(db): Extension, Path((w_id, script_path)): Path<(String, StripPath)>, Query(run_query): Query, @@ -3733,7 +3701,6 @@ pub async fn run_wait_result_script_by_path( run_query, script_path, authed, - rsmq, user_db, w_id, args, @@ -3747,7 +3714,6 @@ pub async fn run_wait_result_script_by_path_internal( run_query: RunJobQuery, script_path: StripPath, authed: ApiAuthed, - rsmq: Option, user_db: UserDB, w_id: String, args: PushArgsOwned, @@ -3763,7 +3729,7 @@ pub async fn run_wait_result_script_by_path_internal( let tag = run_query.tag.clone().or(tag); check_tag_available_for_workspace(&w_id, &tag).await?; - let tx = PushIsolationLevel::Isolated(user_db, authed.clone().into(), rsmq); + let tx = PushIsolationLevel::Isolated(user_db, authed.clone().into()); let (uuid, tx) = push( &db, @@ -3804,7 +3770,6 @@ pub async fn run_wait_result_script_by_path_internal( pub async fn run_wait_result_script_by_hash( authed: ApiAuthed, Extension(user_db): Extension, - Extension(rsmq): Extension>, Extension(db): Extension, Path((w_id, script_hash)): Path<(String, ScriptHash)>, Query(run_query): Query, @@ -3838,7 +3803,7 @@ pub async fn run_wait_result_script_by_hash( let tag = run_query.tag.clone().or(tag); check_tag_available_for_workspace(&w_id, &tag).await?; - let tx = PushIsolationLevel::Isolated(user_db, authed.clone().into(), rsmq); + let tx = PushIsolationLevel::Isolated(user_db, authed.clone().into()); let (uuid, tx) = push( &db, @@ -3889,7 +3854,6 @@ pub async fn run_wait_result_script_by_hash( pub async fn run_wait_result_flow_by_path( authed: ApiAuthed, Extension(user_db): Extension, - Extension(rsmq): Extension>, Extension(db): Extension, Path((w_id, flow_path)): Path<(String, StripPath)>, Query(run_query): Query, @@ -3899,7 +3863,7 @@ pub async fn run_wait_result_flow_by_path( check_license_key_valid().await?; run_wait_result_flow_by_path_internal( - db, run_query, flow_path, authed, rsmq, user_db, args, w_id, None, + db, run_query, flow_path, authed, user_db, args, w_id, None, ) .await } @@ -3909,7 +3873,6 @@ pub async fn run_wait_result_flow_by_path_internal( run_query: RunJobQuery, flow_path: StripPath, authed: ApiAuthed, - rsmq: Option, user_db: UserDB, args: PushArgsOwned, w_id: String, @@ -3943,7 +3906,7 @@ pub async fn run_wait_result_flow_by_path_internal( let tag = run_query.tag.clone().or(tag); check_tag_available_for_workspace(&w_id, &tag).await?; - let tx = PushIsolationLevel::Isolated(user_db, authed.clone().into(), rsmq); + let tx = PushIsolationLevel::Isolated(user_db, authed.clone().into()); let (uuid, tx) = push( &db, @@ -3986,7 +3949,6 @@ async fn run_preview_script( authed: ApiAuthed, Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Path(w_id): Path, Query(run_query): Query, Json(preview): Json, @@ -4003,7 +3965,7 @@ async fn run_preview_script( let scheduled_for = run_query.get_scheduled_for(&db).await?; let tag = run_query.tag.clone().or(preview.tag.clone()); check_tag_available_for_workspace(&w_id, &tag).await?; - let tx = PushIsolationLevel::Isolated(user_db.clone(), authed.clone().into(), rsmq); + let tx = PushIsolationLevel::Isolated(user_db.clone(), authed.clone().into()); let (uuid, tx) = push( &db, @@ -4055,7 +4017,6 @@ async fn run_bundle_preview_script( authed: ApiAuthed, Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Path(w_id): Path, Query(run_query): Query, mut multipart: axum::extract::Multipart, @@ -4086,8 +4047,7 @@ async fn run_bundle_preview_script( let scheduled_for = run_query.get_scheduled_for(&db).await?; let tag = run_query.tag.clone().or(preview.tag.clone()); check_tag_available_for_workspace(&w_id, &tag).await?; - let ltx = - PushIsolationLevel::Isolated(user_db.clone(), authed.clone().into(), rsmq.clone()); + let ltx = PushIsolationLevel::Isolated(user_db.clone(), authed.clone().into()); let args = preview.args.unwrap_or_default(); @@ -4221,7 +4181,6 @@ pub struct RunDependenciesResponse { async fn run_dependencies_job( authed: ApiAuthed, Extension(db): Extension, - Extension(rsmq): Extension>, Path(w_id): Path, Json(req): Json, ) -> error::Result { @@ -4262,7 +4221,7 @@ async fn run_dependencies_job( let (uuid, tx) = push( &db, - PushIsolationLevel::IsolatedRoot(db.clone(), rsmq), + PushIsolationLevel::IsolatedRoot(db.clone()), &w_id, JobPayload::RawScriptDependencies { script_path: script_path, @@ -4309,7 +4268,6 @@ pub struct RunFlowDependenciesResponse { async fn run_flow_dependencies_job( authed: ApiAuthed, Extension(db): Extension, - Extension(rsmq): Extension>, Path(w_id): Path, Json(req): Json, ) -> error::Result { @@ -4321,7 +4279,7 @@ async fn run_flow_dependencies_job( let (uuid, tx) = push( &db, - PushIsolationLevel::IsolatedRoot(db.clone(), rsmq), + PushIsolationLevel::IsolatedRoot(db.clone()), &w_id, JobPayload::RawFlowDependencies { path: req.path, flow_value: req.flow_value }, PushArgs::from(&HashMap::from([( @@ -4372,7 +4330,6 @@ struct BatchInfo { async fn add_batch_jobs( authed: ApiAuthed, Extension(db): Extension, - Extension(_rsmq): Extension>, Path((w_id, n)): Path<(String, i32)>, Json(batch_info): Json, ) -> error::JsonResult> { @@ -4593,7 +4550,6 @@ async fn run_preview_flow_job( authed: ApiAuthed, Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Path(w_id): Path, Query(run_query): Query, Json(raw_flow): Json, @@ -4607,7 +4563,7 @@ async fn run_preview_flow_job( let scheduled_for = run_query.get_scheduled_for(&db).await?; let tag = run_query.tag.clone().or(raw_flow.tag.clone()); check_tag_available_for_workspace(&w_id, &tag).await?; - let tx = PushIsolationLevel::Isolated(user_db.clone(), authed.clone().into(), rsmq); + let tx = PushIsolationLevel::Isolated(user_db.clone(), authed.clone().into()); let (uuid, tx) = push( &db, @@ -4648,7 +4604,6 @@ pub async fn run_job_by_hash( Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Path((w_id, script_hash)): Path<(String, ScriptHash)>, Query(run_query): Query, args: PushArgsOwned, @@ -4657,7 +4612,6 @@ pub async fn run_job_by_hash( authed, db, user_db, - rsmq, w_id, script_hash, run_query, @@ -4671,7 +4625,6 @@ pub async fn run_job_by_hash_inner( authed: ApiAuthed, db: DB, user_db: UserDB, - rsmq: Option, w_id: String, script_hash: ScriptHash, run_query: RunJobQuery, @@ -4704,7 +4657,7 @@ pub async fn run_job_by_hash_inner( let tag = run_query.tag.clone().or(tag); check_tag_available_for_workspace(&w_id, &tag).await?; - let tx = PushIsolationLevel::Isolated(user_db, authed.clone().into(), rsmq); + let tx = PushIsolationLevel::Isolated(user_db, authed.clone().into()); let (uuid, tx) = push( &db, diff --git a/backend/windmill-api/src/kafka_triggers_ee.rs b/backend/windmill-api/src/kafka_triggers_ee.rs index 0b3db94c4a606..3a03ebe864ed5 100644 --- a/backend/windmill-api/src/kafka_triggers_ee.rs +++ b/backend/windmill-api/src/kafka_triggers_ee.rs @@ -7,7 +7,6 @@ pub fn workspaced_service() -> Router { pub async fn start_kafka_consumers( _db: DB, - _rsmq: Option, mut _killpill_rx: tokio::sync::broadcast::Receiver<()>, ) -> () { // implementation is not open source diff --git a/backend/windmill-api/src/lib.rs b/backend/windmill-api/src/lib.rs index ba1ab8df59bed..28f2523ad5009 100644 --- a/backend/windmill-api/src/lib.rs +++ b/backend/windmill-api/src/lib.rs @@ -38,12 +38,13 @@ use tower_http::{ trace::TraceLayer, }; use windmill_common::db::UserDB; -use windmill_common::worker::{ALL_TAGS, CLOUD_HOSTED}; -use windmill_common::{BASE_URL, INSTANCE_NAME, utils::GIT_VERSION}; +use windmill_common::worker::CLOUD_HOSTED; +use windmill_common::{utils::GIT_VERSION, BASE_URL, INSTANCE_NAME}; use crate::scim_ee::has_scim_token; use windmill_common::error::AppError; +mod ai; mod apps; mod audit; mod capture; @@ -62,7 +63,6 @@ mod http_triggers; mod indexer_ee; mod inputs; mod integration; -mod ai; #[cfg(feature = "parquet")] mod job_helpers_ee; @@ -97,7 +97,6 @@ mod workspaces_ee; pub const DEFAULT_BODY_LIMIT: usize = 2097152 * 100; // 200MB - lazy_static::lazy_static! { pub static ref REQUEST_SIZE_LIMIT: Arc> = Arc::new(RwLock::new(DEFAULT_BODY_LIMIT)); @@ -163,7 +162,6 @@ type ServiceLogIndexReader = windmill_indexer::service_logs_ee::ServiceLogIndexR pub async fn run_server( db: DB, - rsmq: Option, job_index_reader: Option, log_index_reader: Option, addr: SocketAddr, @@ -172,17 +170,6 @@ pub async fn run_server( server_mode: bool, base_internal_url: String, ) -> anyhow::Result<()> { - if let Some(mut rsmq) = rsmq.clone() { - for tag in ALL_TAGS.read().await.iter() { - let r = - rsmq_async::RsmqConnection::create_queue(&mut rsmq, &tag, None, None, None).await; - if let Err(e) = r { - tracing::info!("Redis queue {tag} could not be created: {e:#}"); - } else { - tracing::info!("Redis queue {tag} created"); - } - } - } let user_db = UserDB::new(db.clone()); #[cfg(feature = "enterprise")] @@ -202,7 +189,6 @@ pub async fn run_server( let middleware_stack = ServiceBuilder::new() .layer(Extension(db.clone())) - .layer(Extension(rsmq.clone())) .layer(Extension(user_db.clone())) .layer(Extension(auth_cache.clone())) .layer(Extension(job_index_reader)) @@ -229,7 +215,6 @@ pub async fn run_server( db: db.clone(), user_db: user_db, auth_cache: auth_cache.clone(), - rsmq: rsmq.clone(), base_internal_url: base_internal_url.clone(), }); if let Err(err) = smtp_server.start_listener_thread(addr).await { @@ -266,12 +251,12 @@ pub async fn run_server( if !*CLOUD_HOSTED { let ws_killpill_rx = rx.resubscribe(); - websocket_triggers::start_websockets(db.clone(), rsmq.clone(), ws_killpill_rx).await; + websocket_triggers::start_websockets(db.clone(), ws_killpill_rx).await; #[cfg(all(feature = "enterprise", feature = "kafka"))] { let kafka_killpill_rx = rx.resubscribe(); - kafka_triggers_ee::start_kafka_consumers(db.clone(), rsmq, kafka_killpill_rx).await; + kafka_triggers_ee::start_kafka_consumers(db.clone(), kafka_killpill_rx).await; } } diff --git a/backend/windmill-api/src/resources.rs b/backend/windmill-api/src/resources.rs index 543a978fac89c..88f8d79c29e46 100644 --- a/backend/windmill-api/src/resources.rs +++ b/backend/windmill-api/src/resources.rs @@ -646,7 +646,6 @@ async fn create_resource( Extension(db): Extension, Extension(user_db): Extension, Extension(webhook): Extension, - Extension(rsmq): Extension>, Path(w_id): Path, Query(q): Query, Json(resource): Json, @@ -696,7 +695,6 @@ async fn create_resource( &w_id, DeployedObject::Resource { path: resource.path.clone(), parent_path: None }, Some(format!("Resource '{}' created", resource.path.clone())), - rsmq.clone(), true, ) .await?; @@ -717,7 +715,6 @@ async fn delete_resource( Extension(db): Extension, Extension(user_db): Extension, Extension(webhook): Extension, - Extension(rsmq): Extension>, Path((w_id, path)): Path<(String, StripPath)>, ) -> Result { let path = path.to_path(); @@ -756,7 +753,6 @@ async fn delete_resource( &w_id, DeployedObject::Resource { path: path.to_string(), parent_path: Some(path.to_string()) }, Some(format!("Resource '{}' deleted", path)), - rsmq.clone(), true, ) .await?; @@ -774,7 +770,6 @@ async fn update_resource( Extension(db): Extension, Extension(user_db): Extension, Extension(webhook): Extension, - Extension(rsmq): Extension>, Path((w_id, path)): Path<(String, StripPath)>, Json(ns): Json, ) -> Result { @@ -844,7 +839,6 @@ async fn update_resource( &w_id, DeployedObject::Resource { path: npath.to_string(), parent_path: Some(path.to_string()) }, Some(format!("Resource '{}' updated", npath)), - rsmq.clone(), true, ) .await?; @@ -871,7 +865,6 @@ async fn update_resource_value( Extension(db): Extension, Extension(user_db): Extension, Extension(webhook): Extension, - Extension(rsmq): Extension>, Path((w_id, path)): Path<(String, StripPath)>, Json(nv): Json, ) -> Result { @@ -905,7 +898,6 @@ async fn update_resource_value( &w_id, DeployedObject::Resource { path: path.to_string(), parent_path: Some(path.to_string()) }, None, - rsmq.clone(), true, ) .await?; @@ -1024,7 +1016,6 @@ async fn create_resource_type( Extension(db): Extension, Extension(user_db): Extension, Extension(webhook): Extension, - Extension(rsmq): Extension>, Path(w_id): Path, Json(resource_type): Json, ) -> Result<(StatusCode, String)> { @@ -1056,7 +1047,6 @@ async fn create_resource_type( "Resource Type '{}' created", resource_type.name.clone() )), - rsmq.clone(), true, ) .await?; @@ -1111,7 +1101,6 @@ async fn delete_resource_type( Extension(db): Extension, Extension(user_db): Extension, Extension(webhook): Extension, - Extension(rsmq): Extension>, Path((w_id, name)): Path<(String, String)>, ) -> Result { require_admin(authed.is_admin, &authed.username)?; @@ -1144,7 +1133,6 @@ async fn delete_resource_type( &w_id, DeployedObject::ResourceType { path: name.clone() }, None, - rsmq.clone(), true, ) .await?; @@ -1162,7 +1150,6 @@ async fn update_resource_type( Extension(db): Extension, Extension(user_db): Extension, Extension(webhook): Extension, - Extension(rsmq): Extension>, Path((w_id, name)): Path<(String, String)>, Json(ns): Json, ) -> Result { @@ -1201,7 +1188,6 @@ async fn update_resource_type( &w_id, DeployedObject::ResourceType { path: name.clone() }, None, - rsmq.clone(), true, ) .await?; diff --git a/backend/windmill-api/src/schedule.rs b/backend/windmill-api/src/schedule.rs index fdd1a48738cf8..336bc784dc898 100644 --- a/backend/windmill-api/src/schedule.rs +++ b/backend/windmill-api/src/schedule.rs @@ -28,10 +28,10 @@ use windmill_common::{ db::UserDB, error::{Error, JsonResult, Result}, schedule::Schedule, - utils::{not_found_if_none, paginate, Pagination, StripPath, ScheduleType}, + utils::{not_found_if_none, paginate, Pagination, ScheduleType, StripPath}, }; use windmill_git_sync::{handle_deployment_metadata, DeployedObject}; -use windmill_queue::{schedule::push_scheduled_job, QueueTransaction}; +use windmill_queue::schedule::push_scheduled_job; pub fn workspaced_service() -> Router { Router::new() @@ -124,7 +124,6 @@ async fn create_schedule( authed: ApiAuthed, Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Path(w_id): Path, Json(ns): Json, ) -> Result { @@ -152,20 +151,13 @@ async fn create_schedule( )); } - let mut tx: QueueTransaction<'_, _> = (rsmq.clone(), user_db.begin(&authed).await?).into(); + let mut tx: Transaction<'_, Postgres> = user_db.begin(&authed).await?; // Check schedule for error ScheduleType::from_str(&ns.schedule, ns.cron_version.as_deref())?; - check_path_conflict(tx.transaction_mut(), &w_id, &ns.path).await?; - check_flow_conflict( - tx.transaction_mut(), - &w_id, - &ns.path, - ns.is_flow, - &ns.script_path, - ) - .await?; + check_path_conflict(&mut tx, &w_id, &ns.path).await?; + check_flow_conflict(&mut tx, &w_id, &ns.path, ns.is_flow, &ns.script_path).await?; let schedule = sqlx::query_as::<_, Schedule>( "INSERT INTO schedule (workspace_id, path, schedule, timezone, edited_by, script_path, \ @@ -202,7 +194,7 @@ async fn create_schedule( .bind(&ns.tag) .bind(&ns.paused_until) .bind(&ns.cron_version.unwrap_or("v2".to_string())) - .fetch_one(&mut tx) + .fetch_one(&mut *tx) .await .map_err(|e| Error::InternalErr(format!("inserting schedule in {w_id}: {e:#}")))?; @@ -213,13 +205,12 @@ async fn create_schedule( &w_id, DeployedObject::Schedule { path: ns.path.clone() }, Some(format!("Schedule '{}' created", ns.path.clone())), - rsmq.clone(), true, ) .await?; audit_log( - &mut tx, + &mut *tx, &authed, "schedule.create", ActionKind::Create, @@ -249,20 +240,18 @@ async fn edit_schedule( authed: ApiAuthed, Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Path((w_id, path)): Path<(String, StripPath)>, Json(es): Json, ) -> Result { let path = path.to_path(); let authed = maybe_refresh_folders(&path, &w_id, authed, &db).await; - let mut tx: QueueTransaction<'_, rsmq_async::MultiplexedRsmq> = - (rsmq.clone(), user_db.begin(&authed).await?).into(); + let mut tx = user_db.begin(&authed).await?; // Check schedule for error ScheduleType::from_str(&es.schedule, es.cron_version.as_deref())?; - clear_schedule(tx.transaction_mut(), path, &w_id).await?; + clear_schedule(&mut tx, path, &w_id).await?; let schedule = sqlx::query_as::<_, Schedule>( "UPDATE schedule SET schedule = $1, timezone = $2, args = $3, on_failure = $4, on_failure_times = $5, \ on_failure_exact = $6, on_failure_extra_args = $7, on_recovery = $8, on_recovery_times = $9, \ @@ -291,7 +280,7 @@ async fn edit_schedule( .bind(&path) .bind(&w_id) .bind(&es.cron_version) - .fetch_one(&mut tx) + .fetch_one(&mut *tx) .await .map_err(|e| Error::InternalErr(format!("updating schedule in {w_id}: {e:#}")))?; @@ -302,13 +291,12 @@ async fn edit_schedule( &w_id, DeployedObject::Schedule { path: path.to_string() }, None, - rsmq.clone(), true, ) .await?; audit_log( - &mut tx, + &mut *tx, &authed, "schedule.edit", ActionKind::Update, @@ -407,7 +395,7 @@ pub struct ScheduleWJobs { pub no_flow_overlap: bool, pub tag: Option, pub paused_until: Option>, - pub cron_version: Option + pub cron_version: Option, } async fn list_schedule_with_jobs( @@ -470,17 +458,16 @@ async fn exists_schedule( pub struct PreviewPayload { pub schedule: String, pub timezone: String, - pub cron_version: Option + pub cron_version: Option, } pub async fn preview_schedule( Json(payload): Json, ) -> JsonResult>> { - let schedule = ScheduleType::from_str(&payload.schedule, payload.cron_version.as_deref())?; - let tz = chrono_tz::Tz::from_str(&payload.timezone) - .map_err(|e| Error::BadRequest(e.to_string()))?; + let tz = + chrono_tz::Tz::from_str(&payload.timezone).map_err(|e| Error::BadRequest(e.to_string()))?; let upcoming: Vec> = schedule.upcoming(tz, 5)?; @@ -491,12 +478,10 @@ pub async fn set_enabled( authed: ApiAuthed, Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Path((w_id, path)): Path<(String, StripPath)>, Json(payload): Json, ) -> Result { - let mut tx: QueueTransaction<'_, rsmq_async::MultiplexedRsmq> = - (rsmq.clone(), user_db.begin(&authed).await?).into(); + let mut tx = user_db.begin(&authed).await?; let path = path.to_path(); let schedule_o = sqlx::query_as::<_, Schedule>( "UPDATE schedule SET enabled = $1, email = $2 WHERE path = $3 AND workspace_id = $4 RETURNING *") @@ -504,12 +489,12 @@ pub async fn set_enabled( .bind(&authed.email) .bind(&path) .bind(&w_id) - .fetch_optional(&mut tx) + .fetch_optional(&mut *tx) .await?; let schedule = not_found_if_none(schedule_o, "Schedule", path)?; - clear_schedule(tx.transaction_mut(), path, &w_id).await?; + clear_schedule(&mut tx, path, &w_id).await?; handle_deployment_metadata( &authed.email, @@ -518,13 +503,12 @@ pub async fn set_enabled( &w_id, DeployedObject::Schedule { path: path.to_string() }, None, - rsmq.clone(), true, ) .await?; audit_log( - &mut tx, + &mut *tx, &authed, "schedule.setenabled", ActionKind::Update, @@ -549,12 +533,11 @@ pub async fn set_enabled( // authed: ApiAuthed, // Extension(db): Extension, // Extension(user_db): Extension, -// Extension(rsmq): Extension>, -// Path((w_id, path)): Path<(String, StripPath)>, +// // Path((w_id, path)): Path<(String, StripPath)>, // Json(payload): Json, // ) -> Result { // let mut tx: QueueTransaction<'_, rsmq_async::MultiplexedRsmq> = -// (rsmq, user_db.begin(&authed).await?).into(); +// (user_db.begin(&authed).await?).into(); // let path = path.to_path(); // let schedule_o = sqlx::query_as!( // Schedule, @@ -564,15 +547,15 @@ pub async fn set_enabled( // path, // w_id // ) -// .fetch_optional(&mut tx) +// .fetch_optional(&mut *tx) // .await?; // let schedule = not_found_if_none(schedule_o, "Schedule", path)?; -// clear_schedule(tx.transaction_mut(), path, &w_id).await?; +// clear_schedule(&mut tx, path, &w_id).await?; // audit_log( -// &mut tx, +// &mut *tx, // &authed, // "schedule.setenabled", // ActionKind::Update, @@ -597,7 +580,6 @@ async fn delete_schedule( authed: ApiAuthed, Extension(db): Extension, Extension(user_db): Extension, - Extension(rsmq): Extension>, Path((w_id, path)): Path<(String, StripPath)>, ) -> Result { let mut tx = user_db.begin(&authed).await?; @@ -643,7 +625,6 @@ async fn delete_schedule( &w_id, DeployedObject::Schedule { path: path.to_string() }, Some(format!("Schedule '{}' deleted", path)), - rsmq.clone(), true, ) .await?; @@ -667,7 +648,6 @@ async fn delete_schedule( async fn set_default_error_handler( authed: ApiAuthed, Extension(db): Extension, - Extension(rsmq): Extension>, Path(w_id): Path, Json(payload): Json, ) -> Result<()> { @@ -794,7 +774,6 @@ async fn set_default_error_handler( &w_id, DeployedObject::Schedule { path: updated_schedule_path }, None, - rsmq.clone(), true, ) .await?; diff --git a/backend/windmill-api/src/scripts.rs b/backend/windmill-api/src/scripts.rs index 56b991713f33d..2f8c54232b19a 100644 --- a/backend/windmill-api/src/scripts.rs +++ b/backend/windmill-api/src/scripts.rs @@ -61,7 +61,7 @@ use windmill_common::{ }; use windmill_git_sync::{handle_deployment_metadata, DeployedObject}; use windmill_parser_ts::remove_pinned_imports; -use windmill_queue::{schedule::push_scheduled_job, PushIsolationLevel, QueueTransaction}; +use windmill_queue::{schedule::push_scheduled_job, PushIsolationLevel}; const MAX_HASH_HISTORY_LENGTH_STORED: usize = 20; @@ -352,7 +352,6 @@ async fn create_snapshot_script() -> Result<(StatusCode, String)> { async fn create_snapshot_script( authed: ApiAuthed, Extension(user_db): Extension, - Extension(rsmq): Extension>, Extension(webhook): Extension, Extension(db): Extension, Path(w_id): Path, @@ -374,7 +373,6 @@ async fn create_snapshot_script( w_id.clone(), authed.clone(), db.clone(), - rsmq.clone(), user_db.clone(), webhook.clone(), ) @@ -426,13 +424,12 @@ async fn create_snapshot_script( async fn create_script( authed: ApiAuthed, Extension(user_db): Extension, - Extension(rsmq): Extension>, Extension(webhook): Extension, Extension(db): Extension, Path(w_id): Path, Json(ns): Json, ) -> Result<(StatusCode, String)> { - let (hash, tx) = create_script_internal(ns, w_id, authed, db, rsmq, user_db, webhook).await?; + let (hash, tx) = create_script_internal(ns, w_id, authed, db, user_db, webhook).await?; tx.commit().await?; Ok((StatusCode::CREATED, format!("{}", hash))) } @@ -442,13 +439,9 @@ async fn create_script_internal<'c>( w_id: String, authed: ApiAuthed, db: sqlx::Pool, - rsmq: Option, user_db: UserDB, webhook: WebhookShared, -) -> Result<( - ScriptHash, - QueueTransaction<'c, rsmq_async::MultiplexedRsmq>, -)> { +) -> Result<(ScriptHash, Transaction<'c, Postgres>)> { let codebase = ns.codebase.as_ref(); #[cfg(not(feature = "enterprise"))] if ns.ws_error_handler_muted.is_some_and(|val| val) { @@ -460,13 +453,13 @@ async fn create_script_internal<'c>( let script_path = ns.path.clone(); let hash = ScriptHash(hash_script(&ns)); let authed = maybe_refresh_folders(&ns.path, &w_id, authed, &db).await; - let mut tx: QueueTransaction<'_, _> = (rsmq.clone(), user_db.begin(&authed).await?).into(); + let mut tx: Transaction<'_, Postgres> = user_db.begin(&authed).await?; if sqlx::query_scalar!( "SELECT 1 FROM script WHERE hash = $1 AND workspace_id = $2", hash.0, &w_id ) - .fetch_optional(&mut tx) + .fetch_optional(&mut *tx) .await? .is_some() { @@ -481,7 +474,7 @@ async fn create_script_internal<'c>( ) .bind(&ns.path) .bind(&w_id) - .fetch_optional(&mut tx) + .fetch_optional(&mut *tx) .await?; struct ParentInfo { p_hashes: Vec, @@ -500,7 +493,7 @@ async fn create_script_internal<'c>( s.hash.0, &w_id ) - .execute(&mut tx) + .execute(&mut *tx) .await?; Ok(None) } @@ -510,7 +503,7 @@ async fn create_script_internal<'c>( p_hash.0, &w_id ) - .fetch_optional(&mut tx) + .fetch_optional(&mut *tx) .await? .is_none() { @@ -524,7 +517,7 @@ async fn create_script_internal<'c>( p_hash.0, &w_id ) - .fetch_optional(&mut tx) + .fetch_optional(&mut *tx) .await?; if let Some(clashing_hash) = clashing_hash_o { @@ -536,7 +529,7 @@ async fn create_script_internal<'c>( }; let ScriptWithStarred { script: ps, .. } = - get_script_by_hash_internal(tx.transaction_mut(), &w_id, p_hash, None).await?; + get_script_by_hash_internal(&mut tx, &w_id, p_hash, None).await?; if ps.path != ns.path { require_owner_of_path(&authed, &ps.path)?; @@ -571,7 +564,7 @@ async fn create_script_internal<'c>( p_hash.0, &w_id ) - .execute(&mut tx) + .execute(&mut *tx) .await?; r } @@ -655,7 +648,7 @@ async fn create_script_internal<'c>( codebase, ns.has_preprocessor, ) - .execute(&mut tx) + .execute(&mut *tx) .await?; let p_path_opt = parent_hashes_and_perms.as_ref().map(|x| x.p_path.clone()); if let Some(ref p_path) = p_path_opt { @@ -664,7 +657,7 @@ async fn create_script_internal<'c>( p_path, &w_id ) - .execute(&mut tx) + .execute(&mut *tx) .await?; let mut schedulables = sqlx::query_as::<_, Schedule>( @@ -672,7 +665,7 @@ async fn create_script_internal<'c>( .bind(&ns.path) .bind(&p_path) .bind(&w_id) - .fetch_all(&mut tx) + .fetch_all(&mut *tx) .await?; let schedule = sqlx::query_as::<_, Schedule>( @@ -680,7 +673,7 @@ async fn create_script_internal<'c>( .bind(&ns.path) .bind(&p_path) .bind(&w_id) - .fetch_optional(&mut tx) + .fetch_optional(&mut *tx) .await?; if let Some(schedule) = schedule { @@ -688,7 +681,7 @@ async fn create_script_internal<'c>( } for schedule in schedulables { - clear_schedule(tx.transaction_mut(), &schedule.path, &w_id).await?; + clear_schedule(&mut tx, &schedule.path, &w_id).await?; if schedule.enabled { tx = push_scheduled_job(&db, tx, &schedule, None).await?; @@ -700,12 +693,12 @@ async fn create_script_internal<'c>( ns.path, &w_id ) - .execute(&mut tx) + .execute(&mut *tx) .await?; } if p_hashes.is_some() && !p_hashes.unwrap().is_empty() { audit_log( - &mut tx, + &mut *tx, &authed, "scripts.update", ActionKind::Update, @@ -724,7 +717,7 @@ async fn create_script_internal<'c>( ); } else { audit_log( - &mut tx, + &mut *tx, &authed, "scripts.create", ActionKind::Create, @@ -811,7 +804,6 @@ async fn create_script_internal<'c>( parent_path: p_path_opt, }, ns.deployment_message, - rsmq, false, ) .await?; @@ -956,7 +948,6 @@ async fn get_latest_version( ) -> JsonResult> { let mut tx = user_db.begin(&authed).await?; let row_o = sqlx::query!( - "SELECT s.hash as hash, dm.deployment_msg as deployment_msg FROM script s LEFT JOIN deployment_metadata dm ON s.hash = dm.script_hash WHERE s.workspace_id = $1 AND s.path = $2 @@ -964,7 +955,6 @@ async fn get_latest_version( w_id, path.to_path(), ) - .fetch_optional(&mut *tx) .await?; tx.commit().await?; @@ -978,7 +968,6 @@ async fn get_latest_version( } else { return Ok(Json(None)); } - } async fn update_script_history( @@ -1295,7 +1284,6 @@ async fn archive_script_by_path( Extension(webhook): Extension, Extension(user_db): Extension, Extension(db): Extension, - Extension(rsmq): Extension>, Path((w_id, path)): Path<(String, StripPath)>, ) -> Result<()> { let path = path.to_path(); @@ -1334,7 +1322,6 @@ async fn archive_script_by_path( parent_path: Some(path.to_string()), }, Some(format!("Script '{}' archived", path)), - rsmq, true, ) .await?; @@ -1428,7 +1415,6 @@ async fn delete_script_by_path( Extension(user_db): Extension, Extension(webhook): Extension, Extension(db): Extension, - Extension(rsmq): Extension>, Path((w_id, path)): Path<(String, StripPath)>, ) -> JsonResult { let path = path.to_path(); @@ -1493,7 +1479,6 @@ async fn delete_script_by_path( parent_path: Some(path.to_string()), }, Some(format!("Script '{}' deleted", path)), - rsmq, true, ) .await?; diff --git a/backend/windmill-api/src/smtp_server_ee.rs b/backend/windmill-api/src/smtp_server_ee.rs index 88837c5974102..a680499148b09 100644 --- a/backend/windmill-api/src/smtp_server_ee.rs +++ b/backend/windmill-api/src/smtp_server_ee.rs @@ -6,7 +6,6 @@ pub struct SmtpServer { pub auth_cache: Arc, pub db: DB, pub user_db: UserDB, - pub rsmq: Option, pub base_internal_url: String, } diff --git a/backend/windmill-api/src/users.rs b/backend/windmill-api/src/users.rs index bd3a2d44113fa..3afe303cb9417 100644 --- a/backend/windmill-api/src/users.rs +++ b/backend/windmill-api/src/users.rs @@ -92,7 +92,6 @@ pub fn global_service() -> Router { .route("/setpassword", post(set_password)) .route("/set_password_of/:user", post(set_password_of_user)) .route("/set_login_type/:user", post(set_login_type)) - .route("/create", post(create_user)) .route("/update/:user", post(update_user)) .route("/delete/:user", delete(delete_user)) @@ -876,7 +875,6 @@ pub struct EditLoginType { pub login_type: String, } - #[derive(FromRow, Serialize)] pub struct TruncatedToken { pub label: Option, @@ -1479,7 +1477,7 @@ async fn whois( // ) -> Result<(StatusCode, String)> { // let mut tx = db.begin().await?; -// require_super_admin(&mut tx, email).await?; +// require_super_admin(&mut *tx, email).await?; // sqlx::query!( // "INSERT INTO invite_code @@ -1488,7 +1486,7 @@ async fn whois( // nu.code, // nu.seats // ) -// .execute(&mut tx) +// .execute(&mut *tx) // .await?; // tx.commit().await?; @@ -1550,7 +1548,6 @@ async fn accept_invite( authed: ApiAuthed, Extension(webhook): Extension, Extension(db): Extension, - Extension(rsmq): Extension>, Json(nu): Json, ) -> Result<(StatusCode, String)> { let mut tx = db.begin().await?; @@ -1613,7 +1610,6 @@ async fn accept_invite( &nu.workspace_id, windmill_git_sync::DeployedObject::User { email: authed.email.clone() }, Some(format!("User '{}' accepted invite", &authed.email)), - rsmq, true, ) .await?; @@ -1783,7 +1779,6 @@ async fn get_workspace_user( async fn update_workspace_user( authed: ApiAuthed, Extension(db): Extension, - Extension(rsmq): Extension>, Path((w_id, username_to_update)): Path<(String, String)>, Json(eu): Json, ) -> Result { @@ -1852,7 +1847,6 @@ async fn update_workspace_user( &w_id, windmill_git_sync::DeployedObject::User { email: user_email.clone() }, Some(format!("Updated user '{}'", &user_email)), - rsmq, true, ) .await?; @@ -1972,16 +1966,14 @@ async fn create_user( Extension(db): Extension, Extension(webhook): Extension, Extension(argon2): Extension>>, - Extension(rsmq): Extension>, Json(nu): Json, ) -> Result<(StatusCode, String)> { - crate::users_ee::create_user(authed, db, webhook, argon2, rsmq, nu).await + crate::users_ee::create_user(authed, db, webhook, argon2, nu).await } async fn delete_workspace_user( authed: ApiAuthed, Extension(db): Extension, - Extension(rsmq): Extension>, Path((w_id, username_to_delete)): Path<(String, String)>, ) -> Result { let mut tx = db.begin().await?; @@ -2036,7 +2028,6 @@ async fn delete_workspace_user( "Removed user '{}' from workspace", &email_to_delete )), - rsmq, true, ) .await?; @@ -2094,8 +2085,10 @@ async fn set_login_type( .await?; tx.commit().await?; - Ok(format!("login type of {} updated to {}", email, et.login_type)) - + Ok(format!( + "login type of {} updated to {}", + email, et.login_type + )) } async fn login( diff --git a/backend/windmill-api/src/users_ee.rs b/backend/windmill-api/src/users_ee.rs index 65a2a2ed17cb2..0e20d5bf841cd 100644 --- a/backend/windmill-api/src/users_ee.rs +++ b/backend/windmill-api/src/users_ee.rs @@ -15,7 +15,6 @@ pub async fn create_user( _db: DB, _webhook: WebhookShared, _argon2: Arc>, - _rsmq: Option, mut _nu: NewUser, ) -> Result<(StatusCode, String)> { Err(Error::InternalErr( diff --git a/backend/windmill-api/src/variables.rs b/backend/windmill-api/src/variables.rs index 40e74d41eaa11..ae7b8f75d6989 100644 --- a/backend/windmill-api/src/variables.rs +++ b/backend/windmill-api/src/variables.rs @@ -299,7 +299,6 @@ async fn create_variable( Extension(db): Extension, Extension(user_db): Extension, Extension(webhook): Extension, - Extension(rsmq): Extension>, Path(w_id): Path, Query(AlreadyEncrypted { already_encrypted }): Query, Json(variable): Json, @@ -352,7 +351,6 @@ async fn create_variable( &w_id, DeployedObject::Variable { path: variable.path.clone(), parent_path: None }, Some(format!("Variable '{}' created", variable.path.clone())), - rsmq.clone(), true, ) .await?; @@ -384,7 +382,6 @@ async fn delete_variable( Extension(db): Extension, Extension(user_db): Extension, Extension(webhook): Extension, - Extension(rsmq): Extension>, Path((w_id, path)): Path<(String, StripPath)>, ) -> Result { let path = path.to_path(); @@ -424,7 +421,6 @@ async fn delete_variable( &w_id, DeployedObject::Variable { path: path.to_string(), parent_path: Some(path.to_string()) }, Some(format!("Variable '{}' deleted", path)), - rsmq.clone(), true, ) .await?; @@ -455,7 +451,6 @@ async fn update_variable( Extension(db): Extension, Extension(user_db): Extension, Extension(webhook): Extension, - Extension(rsmq): Extension>, Path((w_id, path)): Path<(String, StripPath)>, Query(AlreadyEncrypted { already_encrypted }): Query, Json(ns): Json, @@ -578,7 +573,6 @@ async fn update_variable( &w_id, DeployedObject::Variable { path: npath.clone(), parent_path: Some(path.to_string()) }, None, - rsmq.clone(), true, ) .await?; diff --git a/backend/windmill-api/src/websocket_triggers.rs b/backend/windmill-api/src/websocket_triggers.rs index d0fd2fa8cfa59..93961ae0b3a40 100644 --- a/backend/windmill-api/src/websocket_triggers.rs +++ b/backend/windmill-api/src/websocket_triggers.rs @@ -23,9 +23,7 @@ use windmill_audit::{audit_ee::audit_log, ActionKind}; use windmill_common::{ db::UserDB, error::{self, to_anyhow, JsonResult}, - utils::{ - not_found_if_none, paginate, report_critical_error, Pagination, StripPath, - }, + utils::{not_found_if_none, paginate, report_critical_error, Pagination, StripPath}, worker::{to_raw_value, CLOUD_HOSTED}, INSTANCE_NAME, }; @@ -375,7 +373,6 @@ async fn exists_websocket_trigger( async fn listen_to_unlistened_websockets( db: &DB, - rsmq: &Option, killpill_rx: &tokio::sync::broadcast::Receiver<()>, ) -> () { match sqlx::query_as::<_, WebsocketTrigger>( @@ -389,7 +386,7 @@ async fn listen_to_unlistened_websockets( Ok(mut triggers) => { triggers.shuffle(&mut rand::thread_rng()); for trigger in triggers { - maybe_listen_to_websocket(trigger, db.clone(), rsmq.clone(), killpill_rx.resubscribe()).await; + maybe_listen_to_websocket(trigger, db.clone(), killpill_rx.resubscribe()).await; } } Err(err) => { @@ -398,13 +395,9 @@ async fn listen_to_unlistened_websockets( }; } -pub async fn start_websockets( - db: DB, - rsmq: Option, - mut killpill_rx: tokio::sync::broadcast::Receiver<()>, -) -> () { +pub async fn start_websockets(db: DB, mut killpill_rx: tokio::sync::broadcast::Receiver<()>) -> () { tokio::spawn(async move { - listen_to_unlistened_websockets(&db, &rsmq, &killpill_rx).await; + listen_to_unlistened_websockets(&db, &&killpill_rx).await; loop { tokio::select! { biased; @@ -412,7 +405,7 @@ pub async fn start_websockets( return; } _ = tokio::time::sleep(tokio::time::Duration::from_secs(15)) => { - listen_to_unlistened_websockets(&db, &rsmq, &killpill_rx).await; + listen_to_unlistened_websockets(&db, &&killpill_rx).await; } } } @@ -422,7 +415,6 @@ pub async fn start_websockets( async fn maybe_listen_to_websocket( ws_trigger: WebsocketTrigger, db: DB, - rsmq: Option, killpill_rx: tokio::sync::broadcast::Receiver<()>, ) -> () { match sqlx::query_scalar!( @@ -433,7 +425,7 @@ async fn maybe_listen_to_websocket( ).fetch_optional(&db).await { Ok(has_lock) => { if has_lock.flatten().unwrap_or(false) { - tokio::spawn(listen_to_websocket(ws_trigger, db, rsmq, killpill_rx)); + tokio::spawn(listen_to_websocket(ws_trigger, db, killpill_rx)); } else { tracing::info!("Websocket {} already being listened to", ws_trigger.url); } @@ -517,7 +509,6 @@ async fn wait_runnable_result( ws_trigger: &WebsocketTrigger, username_override: String, db: &DB, - rsmq: Option, ) -> error::Result { let user_db = UserDB::new(db.clone()); let authed = fetch_api_authed( @@ -539,7 +530,6 @@ async fn wait_runnable_result( authed, db.clone(), user_db, - rsmq.clone(), ws_trigger.workspace_id.clone(), StripPath(path.clone()), RunJobQuery::default(), @@ -552,7 +542,6 @@ async fn wait_runnable_result( authed, db.clone(), user_db, - rsmq.clone(), ws_trigger.workspace_id.clone(), StripPath(path.clone()), RunJobQuery::default(), @@ -616,7 +605,6 @@ async fn send_initial_messages( ws_trigger: &WebsocketTrigger, mut writer: SplitSink>, Message>, db: &DB, - rsmq: Option, ) -> error::Result<()> { let initial_messages: Vec = ws_trigger .initial_messages @@ -656,7 +644,6 @@ async fn send_initial_messages( ws_trigger, "init".to_string(), db, - rsmq.clone(), ) .await?; @@ -690,7 +677,6 @@ async fn get_url_from_runnable( is_flow: bool, ws_trigger: &WebsocketTrigger, db: &DB, - rsmq: Option, ) -> error::Result { tracing::info!("Running runnable {path} (is_flow: {is_flow}) to get websocket URL",); @@ -701,7 +687,6 @@ async fn get_url_from_runnable( ws_trigger, "url".to_string(), db, - rsmq, ) .await?; @@ -768,7 +753,6 @@ async fn disable_with_error(db: &DB, ws_trigger: &WebsocketTrigger, error: Strin async fn listen_to_websocket( ws_trigger: WebsocketTrigger, db: DB, - rsmq: Option, mut killpill_rx: tokio::sync::broadcast::Receiver<()>, ) -> () { if let None = update_ping(&db, &ws_trigger, Some("Connecting...")).await { @@ -797,7 +781,7 @@ async fn listen_to_websocket( )) => { return; }, - url_result = get_url_from_runnable(path, url.starts_with("$flow:"), &ws_trigger, &db, rsmq.clone()) => match url_result { + url_result = get_url_from_runnable(path, url.starts_with("$flow:"), &ws_trigger, &db) => match url_result { Ok(url) => url, Err(err) => { disable_with_error( @@ -850,7 +834,7 @@ async fn listen_to_websocket( return; } _ = async { - if let Err(err) = send_initial_messages(&ws_trigger, writer, &db, rsmq.clone()).await { + if let Err(err) = send_initial_messages(&ws_trigger, writer, &db).await { disable_with_error(&db, &ws_trigger, format!("Error sending initial messages: {:?}", err)).await; } else { // if initial messages sent successfully, wait forever @@ -897,7 +881,7 @@ async fn listen_to_websocket( } } if should_handle { - if let Err(err) = run_job(&db, rsmq.clone(), &ws_trigger, text).await { + if let Err(err) = run_job(&db, &ws_trigger, text).await { report_critical_error(format!("Failed to trigger job from websocket {}: {:?}", ws_trigger.url, err), db.clone(), Some(&ws_trigger.workspace_id), None).await; }; } @@ -948,12 +932,7 @@ async fn listen_to_websocket( } } -async fn run_job( - db: &DB, - rsmq: Option, - trigger: &WebsocketTrigger, - msg: String, -) -> anyhow::Result<()> { +async fn run_job(db: &DB, trigger: &WebsocketTrigger, msg: String) -> anyhow::Result<()> { let args = PushArgsOwned { args: HashMap::from([("msg".to_string(), to_raw_value(&msg))]), extra: Some(HashMap::from([( @@ -983,7 +962,6 @@ async fn run_job( authed, db.clone(), user_db, - rsmq, trigger.workspace_id.clone(), StripPath(trigger.script_path.to_owned()), run_query, @@ -996,7 +974,6 @@ async fn run_job( authed, db.clone(), user_db, - rsmq, trigger.workspace_id.clone(), StripPath(trigger.script_path.to_owned()), run_query, diff --git a/backend/windmill-api/src/workspaces.rs b/backend/windmill-api/src/workspaces.rs index 883a23dad70ae..14c7de5017137 100644 --- a/backend/windmill-api/src/workspaces.rs +++ b/backend/windmill-api/src/workspaces.rs @@ -60,7 +60,7 @@ use windmill_common::{ use windmill_git_sync::handle_deployment_metadata; #[cfg(feature = "enterprise")] -use crate::utils::require_admin_or_devops; +use windmill_common::utils::require_admin_or_devops; use crate::oauth2_ee::InstanceEvent; use crate::variables::{decrypt, encrypt}; @@ -503,7 +503,6 @@ async fn edit_slack_command( async fn run_slack_message_test_job( authed: ApiAuthed, Extension(db): Extension, - Extension(rsmq): Extension>, Path(w_id): Path, Json(req): Json, ) -> JsonResult { @@ -520,7 +519,6 @@ async fn run_slack_message_test_job( let uuid = windmill_queue::push_error_handler( &db, - rsmq, Uuid::parse_str("00000000-0000-0000-0000-000000000000")?, None, Some("slack_message_test".to_string()), @@ -600,11 +598,10 @@ async fn is_allowed_auto_domain(ApiAuthed { email, .. }: ApiAuthed) -> JsonResul async fn edit_auto_invite( authed: ApiAuthed, Extension(db): Extension, - Extension(rsmq): Extension>, Path(w_id): Path, Json(ea): Json, ) -> Result { - crate::workspaces_ee::edit_auto_invite(authed, db, rsmq, w_id, ea).await + crate::workspaces_ee::edit_auto_invite(authed, db, w_id, ea).await } async fn edit_webhook( @@ -1469,7 +1466,7 @@ async fn create_workspace( // nw.id, // "finland does not actually exist", // ) - // .execute(&mut tx) + // .execute(&mut *tx) // .await?; let automate_username_creation = sqlx::query_scalar!( @@ -1887,7 +1884,6 @@ async fn add_user( authed: ApiAuthed, Extension(db): Extension, Extension(webhook): Extension, - Extension(rsmq): Extension>, Path(w_id): Path, Json(mut nu): Json, ) -> Result<(StatusCode, String)> { @@ -1994,7 +1990,6 @@ async fn add_user( &w_id, windmill_git_sync::DeployedObject::User { email: nu.email.clone() }, Some(format!("Added user '{}' to workspace", &nu.email)), - rsmq, true, ) .await?; diff --git a/backend/windmill-api/src/workspaces_ee.rs b/backend/windmill-api/src/workspaces_ee.rs index 565a53b174f9c..d5fd2cdda1cec 100644 --- a/backend/windmill-api/src/workspaces_ee.rs +++ b/backend/windmill-api/src/workspaces_ee.rs @@ -6,7 +6,6 @@ use crate::{ pub async fn edit_auto_invite( _authed: ApiAuthed, _db: DB, - _rsmq: Option, _w_id: String, _ea: EditAutoInvite, ) -> windmill_common::error::Result { diff --git a/backend/windmill-autoscaling/Cargo.toml b/backend/windmill-autoscaling/Cargo.toml index 7aada6f904eef..fbebaf0fd779d 100644 --- a/backend/windmill-autoscaling/Cargo.toml +++ b/backend/windmill-autoscaling/Cargo.toml @@ -20,5 +20,4 @@ serde_json.workspace = true tracing.workspace = true windmill-common = { workspace = true, default-features = false } windmill-queue.workspace = true -rsmq_async.workspace = true anyhow.workspace = true \ No newline at end of file diff --git a/backend/windmill-git-sync/Cargo.toml b/backend/windmill-git-sync/Cargo.toml index 07ab7d3d8b305..d5a8412fae8af 100644 --- a/backend/windmill-git-sync/Cargo.toml +++ b/backend/windmill-git-sync/Cargo.toml @@ -20,5 +20,4 @@ serde_json.workspace = true tracing.workspace = true windmill-common = { workspace = true, default-features = false } windmill-queue.workspace = true -rsmq_async.workspace = true regex = "1.10.3" \ No newline at end of file diff --git a/backend/windmill-git-sync/src/git_sync_ee.rs b/backend/windmill-git-sync/src/git_sync_ee.rs index 4be2858c27cad..cc245d3d0c3af 100644 --- a/backend/windmill-git-sync/src/git_sync_ee.rs +++ b/backend/windmill-git-sync/src/git_sync_ee.rs @@ -2,14 +2,13 @@ use windmill_common::error::Result; use crate::{DeployedObject, DB}; -pub async fn handle_deployment_metadata<'c, R: rsmq_async::RsmqConnection + Send + Clone + 'c>( +pub async fn handle_deployment_metadata<'c>( _email: &str, _created_by: &str, _db: &DB, _w_id: &str, _obj: DeployedObject, _deployment_message: Option, - _rsmq: Option, _skip_db_insert: bool, ) -> Result<()> { // Git sync is an enterprise feature and not part of the open-source version diff --git a/backend/windmill-queue/Cargo.toml b/backend/windmill-queue/Cargo.toml index d0e9b073436d1..7ac744a8857ad 100644 --- a/backend/windmill-queue/Cargo.toml +++ b/backend/windmill-queue/Cargo.toml @@ -34,7 +34,6 @@ reqwest.workspace = true lazy_static.workspace = true prometheus = { workspace = true, optional = true } cron.workspace = true -rsmq_async.workspace = true tokio.workspace = true futures-core.workspace = true futures.workspace = true diff --git a/backend/windmill-queue/src/jobs.rs b/backend/windmill-queue/src/jobs.rs index 728b49386371c..39561175dd579 100644 --- a/backend/windmill-queue/src/jobs.rs +++ b/backend/windmill-queue/src/jobs.rs @@ -26,7 +26,6 @@ use reqwest::{ header::{HeaderMap, CONTENT_TYPE}, Client, StatusCode, }; -use rsmq_async::RsmqConnection; use serde::{ser::SerializeMap, Deserialize, Serialize}; use serde_json::{json, value::RawValue}; use sqlx::{types::Json, FromRow, Pool, Postgres, Transaction}; @@ -61,8 +60,7 @@ use windmill_common::{ utils::{not_found_if_none, report_critical_error, StripPath}, worker::{ to_raw_value, DEFAULT_TAGS_PER_WORKSPACE, DEFAULT_TAGS_WORKSPACES, - MIN_VERSION_IS_AT_LEAST_1_427, NO_LOGS, WORKER_CONFIG, WORKER_PULL_QUERIES, - WORKER_SUSPENDED_PULL_QUERY, + MIN_VERSION_IS_AT_LEAST_1_427, NO_LOGS, WORKER_PULL_QUERIES, WORKER_SUSPENDED_PULL_QUERY, }, DB, METRICS_ENABLED, }; @@ -76,10 +74,7 @@ use windmill_common::users::SUPERADMIN_SYNC_EMAIL; #[cfg(feature = "enterprise")] use windmill_common::worker::CLOUD_HOSTED; -use crate::{ - schedule::{get_schedule_opt, push_scheduled_job}, - QueueTransaction, -}; +use crate::schedule::{get_schedule_opt, push_scheduled_job}; #[cfg(feature = "prometheus")] lazy_static::lazy_static! { @@ -151,14 +146,12 @@ pub async fn cancel_single_job<'c>( w_id: &str, mut tx: Transaction<'c, Postgres>, db: &Pool, - rsmq: Option, force_cancel: bool, ) -> error::Result<(Transaction<'c, Postgres>, Option)> { if force_cancel || (job_running.parent_job.is_none() && !job_running.running) { let username = username.to_string(); let w_id = w_id.to_string(); let db = db.clone(); - let rsmq = rsmq.clone(); let job_running = job_running.clone(); tokio::task::spawn(async move { let reason: String = reason @@ -178,7 +171,6 @@ pub async fn cancel_single_job<'c>( job_running.mem_peak.unwrap_or(0), Some(CanceledBy { username: Some(username.to_string()), reason: Some(reason) }), e, - rsmq.clone(), "server", false, #[cfg(feature = "benchmark")] @@ -204,11 +196,6 @@ pub async fn cancel_single_job<'c>( tracing::info!("Soft cancelling job {}", id); } } - if let Some(mut rsmq) = rsmq.clone() { - rsmq.change_message_visibility(&job_running.tag, &job_running.id.to_string(), 0) - .await - .map_err(|e| anyhow::anyhow!(e))?; - } Ok((tx, Some(job_running.id))) } @@ -220,7 +207,6 @@ pub async fn cancel_job<'c>( w_id: &str, mut tx: Transaction<'c, Postgres>, db: &Pool, - rsmq: Option, force_cancel: bool, require_anonymous: bool, ) -> error::Result<(Transaction<'c, Postgres>, Option)> { @@ -278,7 +264,6 @@ pub async fn cancel_job<'c>( w_id, tx, db, - rsmq.clone(), force_cancel, ) .await?; @@ -296,7 +281,6 @@ pub async fn cancel_job<'c>( w_id, tx, db, - rsmq.clone(), force_cancel, ) .await?; @@ -346,29 +330,16 @@ pub async fn cancel_persistent_script_jobs<'c>( script_path: &str, w_id: &str, db: &Pool, - rsmq: Option, ) -> error::Result> { // There can be only one perpetual script run per 10 seconds window. We execute the cancel twice with 5s interval // to avoid the case of a job _about to be restarted_ when the first cancel is run. - let cancelled_job_ids_first_batch = cancel_persistent_script_jobs_internal( - username, - reason.clone(), - script_path, - w_id, - db, - rsmq.clone(), - ) - .await?; + let cancelled_job_ids_first_batch = + cancel_persistent_script_jobs_internal(username, reason.clone(), script_path, w_id, db) + .await?; sleep(std::time::Duration::from_secs(5)).await; - let cancelled_job_ids_second_batch = cancel_persistent_script_jobs_internal( - username, - reason.clone(), - script_path, - w_id, - db, - rsmq.clone(), - ) - .await?; + let cancelled_job_ids_second_batch = + cancel_persistent_script_jobs_internal(username, reason.clone(), script_path, w_id, db) + .await?; return Ok(cancelled_job_ids_first_batch .into_iter() .chain(cancelled_job_ids_second_batch) @@ -381,7 +352,6 @@ async fn cancel_persistent_script_jobs_internal<'c>( script_path: &str, w_id: &str, db: &Pool, - rsmq: Option, ) -> error::Result> { let mut tx = db.begin().await?; @@ -403,7 +373,6 @@ async fn cancel_persistent_script_jobs_internal<'c>( w_id, tx, db, - rsmq.clone(), false, false, ) @@ -485,13 +454,12 @@ where } #[instrument(level = "trace", skip_all)] -pub async fn add_completed_job_error( +pub async fn add_completed_job_error( db: &Pool, queued_job: &QueuedJob, mem_peak: i32, canceled_by: Option, e: serde_json::Value, - rsmq: Option, _worker_name: &str, flow_is_done: bool, #[cfg(feature = "benchmark")] bench: &mut windmill_common::bench::BenchmarkIter, @@ -530,7 +498,6 @@ pub async fn add_completed_job_error( +pub async fn add_completed_job( db: &Pool, queued_job: &QueuedJob, success: bool, @@ -555,7 +519,6 @@ pub async fn add_completed_job< result: Json<&T>, mem_peak: i32, canceled_by: Option, - rsmq: Option, flow_is_done: bool, #[cfg(feature = "benchmark")] bench: &mut windmill_common::bench::BenchmarkIter, ) -> Result { @@ -569,7 +532,7 @@ pub async fn add_completed_job< )); } - let mut tx: QueueTransaction<'_, R> = (rsmq.clone(), db.begin().await?).into(); + let mut tx = db.begin().await?; let job_id = queued_job.id; // tracing::error!("1 {:?}", start.elapsed()); @@ -673,7 +636,7 @@ pub async fn add_completed_job< queued_job.tag, queued_job.priority, ) - .fetch_one(&mut tx) + .fetch_one(&mut *tx) .await .map_err(|e| Error::InternalErr(format!("Could not add completed job {job_id}: {e:#}")))?; // tracing::error!("2 {:?}", start.elapsed()); @@ -689,7 +652,7 @@ pub async fn add_completed_job< &queued_job.id, &queued_job.workspace_id ) - .execute(&mut tx) + .execute(&mut *tx) .await { tracing::error!("Could not update job duration: {}", e); } @@ -702,7 +665,7 @@ pub async fn add_completed_job< parent_job, &queued_job.workspace_id ) - .execute(&mut tx) + .execute(&mut *tx) .await { tracing::error!("Could not update parent job flow_status: {}", e); } @@ -726,14 +689,14 @@ pub async fn add_completed_job< parent_job, &queued_job.workspace_id ) - .execute(&mut tx) + .execute(&mut *tx) .await?; if flow_is_done { let r = sqlx::query_scalar!( "UPDATE parallel_monitor_lock SET last_ping = now() WHERE parent_flow_id = $1 and job_id = $2 RETURNING 1", parent_job, &queued_job.id - ).fetch_optional(&mut tx).await?; + ).fetch_optional(&mut *tx).await?; if r.is_some() { tracing::info!( "parallel flow iteration is done, setting parallel monitor last ping lock for job {}", @@ -747,12 +710,8 @@ pub async fn add_completed_job< let schedule_path = queued_job.schedule_path.as_ref().unwrap(); let script_path = queued_job.script_path.as_ref().unwrap(); - let schedule = get_schedule_opt( - tx.transaction_mut(), - &queued_job.workspace_id, - schedule_path, - ) - .await?; + let schedule = + get_schedule_opt(&mut tx, &queued_job.workspace_id, schedule_path).await?; if let Some(schedule) = schedule { #[cfg(feature = "enterprise")] @@ -774,7 +733,6 @@ pub async fn add_completed_job< if schedule_next_tick { if let Err(err) = handle_maybe_scheduled_job( - rsmq.clone(), db, queued_job, &schedule, @@ -793,7 +751,6 @@ pub async fn add_completed_job< #[cfg(feature = "enterprise")] if let Err(err) = apply_schedule_handlers( - rsmq.clone(), db, &schedule, script_path, @@ -812,8 +769,7 @@ pub async fn add_completed_job< let w_id: &String = &queued_job.workspace_id; if !matches!(err, Error::QuotaExceeded(_)) { report_error_to_workspace_handler_or_critical_side_channel( - rsmq.clone(), - &queued_job, + &queued_job, db, format!( "Failed to push schedule error handler job to handle failed job ({base_url}/run/{}?workspace={w_id}): {}", @@ -853,7 +809,7 @@ pub async fn add_completed_job< concurrency_key, queued_job.id.hyphenated().to_string(), ) - .execute(&mut tx) + .execute(&mut *tx) .await { tracing::error!("Could not decrement concurrency counter: {}", e); @@ -863,7 +819,7 @@ pub async fn add_completed_job< "UPDATE concurrency_key SET ended_at = now() WHERE job_id = $1", queued_job.id, ) - .execute(&mut tx) + .execute(&mut *tx) .await .map_err(|e| { Error::InternalErr(format!( @@ -877,7 +833,7 @@ pub async fn add_completed_job< if JOB_TOKEN.is_none() { sqlx::query!("DELETE FROM job_perms WHERE job_id = $1", job_id) - .execute(&mut tx) + .execute(&mut *tx) .await?; } @@ -968,7 +924,6 @@ pub async fn add_completed_job< let base_url = BASE_URL.read().await; let w_id = &queued_job.workspace_id; report_error_to_workspace_handler_or_critical_side_channel( - rsmq.clone(), &queued_job, db, format!( @@ -1002,9 +957,7 @@ pub async fn add_completed_job< "Sending error of job {} to error handlers (if any)", queued_job.id ); - if let Err(e) = - send_error_to_global_handler(rsmq.clone(), &queued_job, db, Json(&result)).await - { + if let Err(e) = send_error_to_global_handler(&queued_job, db, Json(&result)).await { tracing::error!( "Could not run global error handler for job {}: {}", &queued_job.id, @@ -1013,7 +966,6 @@ pub async fn add_completed_job< } if let Err(err) = send_error_to_workspace_handler( - rsmq.clone(), &queued_job, canceled_by.is_some(), db, @@ -1051,7 +1003,7 @@ pub async fn add_completed_job< .unwrap_or(false); if p { - let tx = PushIsolationLevel::IsolatedRoot(db.clone(), rsmq); + let tx = PushIsolationLevel::IsolatedRoot(db.clone()); // perpetual jobs can run one job per 10s max. If the job was faster than 10s, schedule the next one with the appropriate delay let now = chrono::Utc::now(); @@ -1123,12 +1075,7 @@ pub async fn add_completed_job< Ok(queued_job.id) } -pub async fn send_error_to_global_handler< - 'a, - T: Serialize + Send + Sync, - R: rsmq_async::RsmqConnection + Clone + Send, ->( - rsmq: Option, +pub async fn send_error_to_global_handler<'a, T: Serialize + Send + Sync>( queued_job: &QueuedJob, db: &Pool, result: Json<&T>, @@ -1143,7 +1090,6 @@ pub async fn send_error_to_global_handler< }; push_error_handler( db, - rsmq, queued_job.id, queued_job.schedule_path.clone(), queued_job.script_path.clone(), @@ -1165,10 +1111,7 @@ pub async fn send_error_to_global_handler< Ok(()) } -pub async fn report_error_to_workspace_handler_or_critical_side_channel< - R: rsmq_async::RsmqConnection + Clone + Send, ->( - rsmq: Option, +pub async fn report_error_to_workspace_handler_or_critical_side_channel( queued_job: &QueuedJob, db: &Pool, error_message: String, @@ -1186,7 +1129,6 @@ pub async fn report_error_to_workspace_handler_or_critical_side_channel< if let Some(error_handler) = error_handler { if let Err(err) = push_error_handler( db, - rsmq, queued_job.id, queued_job.schedule_path.clone(), queued_job.script_path.clone(), @@ -1220,13 +1162,7 @@ pub async fn report_error_to_workspace_handler_or_critical_side_channel< } } -pub async fn send_error_to_workspace_handler< - 'a, - 'c, - T: Serialize + Send + Sync, - R: rsmq_async::RsmqConnection + Clone + Send, ->( - rsmq: Option, +pub async fn send_error_to_workspace_handler<'a, 'c, T: Serialize + Send + Sync>( queued_job: &QueuedJob, is_canceled: bool, db: &Pool, @@ -1274,7 +1210,6 @@ pub async fn send_error_to_workspace_handler< push_error_handler( db, - rsmq, queued_job.id, queued_job.schedule_path.clone(), queued_job.script_path.clone(), @@ -1301,8 +1236,7 @@ use backon::ConstantBuilder; use backon::{BackoffBuilder, Retryable}; #[instrument(level = "trace", skip_all)] -pub async fn handle_maybe_scheduled_job<'c, R: rsmq_async::RsmqConnection + Clone + Send + 'c>( - rsmq: Option, +pub async fn handle_maybe_scheduled_job<'c>( db: &Pool, job: &QueuedJob, schedule: &Schedule, @@ -1318,7 +1252,7 @@ pub async fn handle_maybe_scheduled_job<'c, R: rsmq_async::RsmqConnection + Clon if schedule.enabled && script_path == schedule.script_path { let push_next_job_future = (|| { tokio::time::timeout(std::time::Duration::from_secs(5), async { - let mut tx: QueueTransaction<'_, _> = (rsmq.clone(), db.begin().await?).into(); + let mut tx = db.begin().await?; tx = push_scheduled_job(db, tx, &schedule, None).await?; tx.commit().await?; Ok::<(), Error>(()) @@ -1358,7 +1292,7 @@ pub async fn handle_maybe_scheduled_job<'c, R: rsmq_async::RsmqConnection + Clon match err { Ok(Err(Error::QuotaExceeded(_))) => {} _ => { - report_error_to_workspace_handler_or_critical_side_channel(rsmq, job, db, + report_error_to_workspace_handler_or_critical_side_channel(job, db, format!("Could not schedule next job for {} with err {}. Schedule disabled", schedule.path, err_str) ).await; } @@ -1368,7 +1302,7 @@ pub async fn handle_maybe_scheduled_job<'c, R: rsmq_async::RsmqConnection + Clon Err(disable_err) => match err { Ok(Err(err2 @ Error::QuotaExceeded(_))) => Err(err2), _ => { - report_error_to_workspace_handler_or_critical_side_channel(rsmq, job, db, + report_error_to_workspace_handler_or_critical_side_channel(job, db, format!("Could not schedule next job for {} and could not disable schedule with err {}. Will retry", schedule.path, disable_err) ).await; Err(to_anyhow(disable_err).into()) @@ -1400,13 +1334,7 @@ struct CompletedJobSubset { } #[cfg(feature = "enterprise")] -async fn apply_schedule_handlers< - 'a, - 'c, - T: Serialize + Send + Sync, - R: rsmq_async::RsmqConnection + Clone + Send + 'c, ->( - rsmq: Option, +async fn apply_schedule_handlers<'a, 'c, T: Serialize + Send + Sync>( db: &Pool, schedule: &Schedule, script_path: &str, @@ -1448,7 +1376,6 @@ async fn apply_schedule_handlers< push_error_handler( db, - rsmq, job_id, Some(schedule.path.to_string()), Some(script_path.to_string()), @@ -1470,7 +1397,6 @@ async fn apply_schedule_handlers< if let Some(ref on_success_path) = schedule.on_success { handle_successful_schedule( db, - rsmq.clone(), job_id, &schedule.path, script_path, @@ -1485,7 +1411,7 @@ async fn apply_schedule_handlers< } if let Some(ref on_recovery_path) = schedule.on_recovery.clone() { - let tx: QueueTransaction<'_, R> = (rsmq.clone(), db.begin().await?).into(); + let tx = db.begin().await?; let times = schedule.on_recovery_times.unwrap_or(1).max(1); let past_jobs = sqlx::query_as::<_, CompletedJobSubset>( "SELECT success, result, started_at FROM completed_job WHERE workspace_id = $1 AND schedule_path = $2 AND script_path = $3 AND id != $4 ORDER BY created_at DESC LIMIT $5", @@ -1535,14 +1461,8 @@ async fn apply_schedule_handlers< Ok(()) } -pub async fn push_error_handler< - 'a, - 'c, - T: Serialize + Send + Sync, - R: rsmq_async::RsmqConnection + Clone + Send + 'c, ->( +pub async fn push_error_handler<'a, 'c, T: Serialize + Send + Sync>( db: &Pool, - rsmq: Option, job_id: Uuid, schedule_path: Option, script_path: Option, @@ -1592,7 +1512,7 @@ pub async fn push_error_handler< let result = sanitize_result(result); - let tx = PushIsolationLevel::IsolatedRoot(db.clone(), rsmq); + let tx = PushIsolationLevel::IsolatedRoot(db.clone()); let (uuid, tx) = push( &db, tx, @@ -1653,14 +1573,9 @@ fn sanitize_result(result: Json<&T>) -> HashMap( +async fn handle_recovered_schedule<'a, 'c, T: Serialize + Send + Sync>( db: &Pool, - tx: QueueTransaction<'c, R>, + tx: Transaction<'c, Postgres>, job_id: Uuid, schedule_path: &str, script_path: &str, @@ -1746,14 +1661,8 @@ async fn handle_recovered_schedule< } #[cfg(feature = "enterprise")] -async fn handle_successful_schedule< - 'a, - 'c, - T: Serialize + Send + Sync, - R: rsmq_async::RsmqConnection + Clone + Send + 'c, ->( +async fn handle_successful_schedule<'a, 'c, T: Serialize + Send + Sync>( db: &Pool, - rsmq: Option, job_id: Uuid, schedule_path: &str, script_path: &str, @@ -1791,7 +1700,7 @@ async fn handle_successful_schedule< } } - let tx = PushIsolationLevel::IsolatedRoot(db.clone(), rsmq); + let tx = PushIsolationLevel::IsolatedRoot(db.clone()); let (uuid, tx) = push( &db, tx, @@ -1842,18 +1751,13 @@ impl std::ops::Deref for PulledJob { } } -pub async fn pull( +pub async fn pull( db: &Pool, - rsmq: Option, suspend_first: bool, ) -> windmill_common::error::Result<(Option, bool)> { loop { - let (job, suspended) = pull_single_job_and_mark_as_running_no_concurrency_limit( - db, - rsmq.clone(), - suspend_first, - ) - .await?; + let (job, suspended) = + pull_single_job_and_mark_as_running_no_concurrency_limit(db, suspend_first).await?; let Some(job) = job else { return Ok((None, suspended)); @@ -1879,9 +1783,7 @@ pub async fn pull( return Ok((Option::Some(pulled_job), suspended)); } - let itx = db.begin().await?; - - let mut tx: QueueTransaction<'_, _> = (rsmq.clone(), itx).into(); + let mut tx = db.begin().await?; // Else the job is subject to concurrency limits let job_script_path = pulled_job.script_path.clone().unwrap(); @@ -1913,7 +1815,7 @@ pub async fn pull( "SELECT null FROM queue WHERE id = $1 FOR UPDATE", pulled_job.id ) - .fetch_one(&mut tx) + .fetch_one(&mut *tx) .await .context("lock job in queue")?; @@ -1930,7 +1832,7 @@ pub async fn pull( jobs_uuids_init_json_value, pulled_job.id.hyphenated().to_string(), ) - .fetch_one(&mut tx) + .fetch_one(&mut *tx) .await .map_err(|e| { Error::InternalErr(format!( @@ -1943,7 +1845,7 @@ pub async fn pull( "SELECT COUNT(*) as count, COALESCE(MAX(ended_at), now() - INTERVAL '1 second' * $2) as max_ended_at FROM concurrency_key WHERE key = $1 AND ended_at >= (now() - INTERVAL '1 second' * $2)", job_concurrency_key, f64::from(job_custom_concurrency_time_window_s), - ).fetch_one(&mut tx).await.map_err(|e| { + ).fetch_one(&mut *tx).await.map_err(|e| { Error::InternalErr(format!( "Error getting completed count for key {job_concurrency_key}: {e:#}" )) @@ -1957,7 +1859,7 @@ pub async fn pull( &pulled_job.workspace_id, completed_count.max_ended_at ) - .fetch_one(&mut tx) + .fetch_one(&mut *tx) .await .map_err(|e| { Error::InternalErr(format!( @@ -1985,7 +1887,7 @@ pub async fn pull( pulled_job.id.hyphenated().to_string(), ) - .fetch_one(&mut tx) + .fetch_one(&mut *tx) .await .map_err(|e| { Error::InternalErr(format!( @@ -2003,7 +1905,7 @@ pub async fn pull( DESC LIMIT 10) AS t", job_concurrency_key ) - .fetch_one(&mut tx) + .fetch_one(&mut *tx) .await?; tracing::info!("avg script duration computed: {:?}", avg_script_duration); @@ -2035,7 +1937,7 @@ pub async fn pull( job_concurrency_key, estimated_next_schedule_timestamp, nestimated - ).fetch_optional(&mut tx).await?.flatten().unwrap_or(0) as i32; + ).fetch_optional(&mut *tx).await?.flatten().unwrap_or(0) as i32; tracing::info!("estimated_next_schedule_timestamp: {:?}, jobs_in_window: {jobs_in_window}, nestimated: {nestimated}, inc: {inc}", estimated_next_schedule_timestamp); if jobs_in_window < job_custom_concurrent_limit { break; @@ -2051,35 +1953,9 @@ pub async fn pull( "\nRe-scheduled job to {estimated_next_schedule_timestamp} due to concurrency limits with key {job_concurrency_key} and limit {job_custom_concurrent_limit} in the last {job_custom_concurrency_time_window_s} seconds", ); let _ = append_logs(&job_uuid, &pulled_job.workspace_id, job_log_event, db).await; - if rsmq.is_some() { - // if let Some(ref mut rsmq) = tx.rsmq { - // if using redis, only one message at a time can be poped from the queue. Process only this message and move to the next elligible job - // In this case, the job might be a job from the same script path, but we can't optimise this further - // if using posgtres, then we're able to re-queue the entire batch of scheduled job for this script_path, so we do it - let requeued_job_tag = sqlx::query_scalar::<_, String>(&format!( - "UPDATE queue - SET running = false - , started_at = null - , scheduled_for = '{estimated_next_schedule_timestamp}' - , last_ping = null - WHERE id = '{job_uuid}' - RETURNING tag" - )) - .fetch_one(&mut tx) - .await - .map_err(|e| Error::InternalErr(format!("Could not update and re-queue job {job_uuid}. The job will be marked as running but it is not running: {e:#}")))?; - if let Some(ref mut rsmq) = tx.rsmq { - rsmq.send_message( - job_uuid.to_bytes_le().to_vec(), - Option::Some(estimated_next_schedule_timestamp), - requeued_job_tag, - ); - } - tx.commit().await?; - } else { - // if using posgtres, then we're able to re-queue the entire batch of scheduled job for this script_path, so we do it - sqlx::query!( + // if using posgtres, then we're able to re-queue the entire batch of scheduled job for this script_path, so we do it + sqlx::query!( "UPDATE queue SET running = false , started_at = null @@ -2089,85 +1965,18 @@ pub async fn pull( estimated_next_schedule_timestamp, job_uuid, ) - .fetch_all(&mut tx) + .fetch_all(&mut *tx) .await .map_err(|e| Error::InternalErr(format!("Could not update and re-queue job {job_uuid}. The job will be marked as running but it is not running: {e:#}")))?; - tx.commit().await? - } + tx.commit().await? } } -async fn pull_single_job_and_mark_as_running_no_concurrency_limit< - 'c, - R: rsmq_async::RsmqConnection + Send + Clone, ->( +async fn pull_single_job_and_mark_as_running_no_concurrency_limit<'c>( db: &Pool, - rsmq: Option, suspend_first: bool, ) -> windmill_common::error::Result<(Option, bool)> { - let job_and_suspended: (Option, bool) = if let Some(mut rsmq) = rsmq { - #[cfg(feature = "benchmark")] - let instant = Instant::now(); - - // TODO: REDIS: Race conditions / replace last_ping - - // TODO: shuffle this list to have fairness - let mut all_tags = WORKER_CONFIG.read().await.worker_tags.clone(); - - let mut msg: Option<_> = None; - let mut tag = None; - - while msg.is_none() && !all_tags.is_empty() { - let ntag = all_tags.pop().unwrap(); - tag = Some(ntag.clone()); - msg = rsmq - .receive_message::>(&ntag, Some(10)) - .await - .map_err(|e| anyhow::anyhow!(e))?; - } - - // #[cfg(feature = "benchmark")] - // println!("rsmq 1: {:?}", instant.elapsed()); - - // println!("3.1: {:?} {rs}", instant.elapsed()); - if let Some(msg) = msg { - let uuid = Uuid::from_bytes_le( - msg.message - .try_into() - .map_err(|_| anyhow::anyhow!("Failed to parsed Redis message"))?, - ); - - let m2r = sqlx::query_as::<_, PulledJob>( - "UPDATE queue - SET running = true - , started_at = coalesce(started_at, now()) - , last_ping = now() - , suspend_until = null - WHERE id = $1 - RETURNING id, workspace_id, parent_job, created_by, created_at, started_at, scheduled_for, - running, script_hash, script_path, args, right(logs, 900000) as logs, canceled, canceled_by, - canceled_reason, last_ping, job_kind, schedule_path, permissioned_as, - flow_status, is_flow_step, language, suspend, suspend_until, - same_worker, pre_run_error, email, visible_to_owner, mem_peak, - root_job, leaf_jobs, tag, concurrent_limit, concurrency_time_window_s, - timeout, flow_step_id, cache_ttl, priority, raw_code, raw_lock, raw_flow", - ) - .bind(uuid) - .fetch_optional(db) - .await?; - - rsmq.delete_message(&tag.unwrap(), &msg.id) - .await - .map_err(|e| anyhow::anyhow!(e))?; - - #[cfg(feature = "benchmark")] - println!("rsmq 2: {:?}", instant.elapsed()); - - (m2r, false) - } else { - (None, false) - } - } else { + let job_and_suspended: (Option, bool) = { /* Jobs can be started if they: * - haven't been started before, * running = false @@ -2600,11 +2409,11 @@ async fn extract_result_from_job_result( } #[instrument(level = "trace", skip_all)] -pub async fn delete_job<'c, R: rsmq_async::RsmqConnection + Clone + Send>( - mut tx: QueueTransaction<'c, R>, +pub async fn delete_job<'c>( + mut tx: Transaction<'c, Postgres>, w_id: &str, job_id: Uuid, -) -> windmill_common::error::Result> { +) -> windmill_common::error::Result> { #[cfg(feature = "prometheus")] if METRICS_ENABLED.load(std::sync::atomic::Ordering::Relaxed) { QUEUE_DELETE_COUNT.inc(); @@ -2615,7 +2424,7 @@ pub async fn delete_job<'c, R: rsmq_async::RsmqConnection + Clone + Send>( w_id, job_id ) - .fetch_optional(&mut tx) + .fetch_optional(&mut *tx) .await; if let Err(job_removed) = job_removed { @@ -2672,29 +2481,29 @@ pub async fn get_queued_job(id: &Uuid, w_id: &str, db: &DB) -> error::Result { - IsolatedRoot(DB, Option), - Isolated(UserDB, Authed, Option), - Transaction(QueueTransaction<'c, R>), +pub enum PushIsolationLevel<'c> { + IsolatedRoot(DB), + Isolated(UserDB, Authed), + Transaction(Transaction<'c, Postgres>), } #[macro_export] macro_rules! fetch_scalar_isolated { ( $query:expr, $tx:expr) => { match $tx { - PushIsolationLevel::IsolatedRoot(db, rmsq) => { + PushIsolationLevel::IsolatedRoot(db) => { let r = $query.fetch_optional(&db).await; - $tx = PushIsolationLevel::IsolatedRoot(db, rmsq); + $tx = PushIsolationLevel::IsolatedRoot(db); r } - PushIsolationLevel::Isolated(db, user, rsmq) => { + PushIsolationLevel::Isolated(db, user) => { let mut ntx = db.clone().begin(&user).await?; let r = $query.fetch_optional(&mut *ntx).await; - $tx = PushIsolationLevel::Isolated(db, user, rsmq); + $tx = PushIsolationLevel::Isolated(db, user); r } PushIsolationLevel::Transaction(mut tx) => { - let r = $query.fetch_optional(&mut tx).await; + let r = $query.fetch_optional(&mut *tx).await; $tx = PushIsolationLevel::Transaction(tx); r } @@ -3106,9 +2915,9 @@ struct FlowRawValue { } // #[instrument(level = "trace", skip_all)] -pub async fn push<'c, 'd, R: rsmq_async::RsmqConnection + Send + 'c>( +pub async fn push<'c, 'd>( _db: &Pool, - mut tx: PushIsolationLevel<'c, R>, + mut tx: PushIsolationLevel<'c>, workspace_id: &str, job_payload: JobPayload, mut args: PushArgs<'d>, @@ -3129,7 +2938,7 @@ pub async fn push<'c, 'd, R: rsmq_async::RsmqConnection + Send + 'c>( flow_step_id: Option, _priority_override: Option, authed: Option<&Authed>, -) -> Result<(Uuid, QueueTransaction<'c, R>), Error> { +) -> Result<(Uuid, Transaction<'c, Postgres>), Error> { #[cfg(feature = "cloud")] if *CLOUD_HOSTED { let premium_workspace = @@ -3908,10 +3717,8 @@ pub async fn push<'c, 'd, R: rsmq_async::RsmqConnection + Send + 'c>( }; let mut tx = match tx { - PushIsolationLevel::Isolated(user_db, authed, rsmq) => { - (rsmq, user_db.begin(&authed).await?).into() - } - PushIsolationLevel::IsolatedRoot(db, rsmq) => (rsmq, db.begin().await?).into(), + PushIsolationLevel::Isolated(user_db, authed) => (user_db.begin(&authed).await?).into(), + PushIsolationLevel::IsolatedRoot(db) => db.begin().await?, PushIsolationLevel::Transaction(tx) => tx, }; @@ -3920,7 +3727,7 @@ pub async fn push<'c, 'd, R: rsmq_async::RsmqConnection + Send + 'c>( "SELECT 1 FROM queue WHERE id = $1 UNION ALL select 1 FROM completed_job WHERE id = $1", job_id ) - .fetch_optional(&mut tx) + .fetch_optional(&mut *tx) .await?; if conflicting_id.is_some() { @@ -3947,7 +3754,7 @@ pub async fn push<'c, 'd, R: rsmq_async::RsmqConnection + Send + 'c>( concurrency_key, job_id, ) - .execute(&mut tx) + .execute(&mut *tx) .await .map_err(|e| Error::InternalErr(format!("Could not insert concurrency_key={concurrency_key} for job_id={job_id} script_path={script_path:?} workspace_id={workspace_id}: {e:#}")))?; } @@ -3974,7 +3781,7 @@ pub async fn push<'c, 'd, R: rsmq_async::RsmqConnection + Send + 'c>( raw_flow.as_ref() as Option<&Json>, tag, ) - .execute(&mut tx) + .execute(&mut *tx) .await?; let (raw_code, raw_lock, raw_flow) = if !*MIN_VERSION_IS_AT_LEAST_1_427.read().await { @@ -4024,7 +3831,7 @@ pub async fn push<'c, 'd, R: rsmq_async::RsmqConnection + Send + 'c>( cache_ttl, final_priority, ) - .fetch_one(&mut tx) + .fetch_one(&mut *tx) .await .map_err(|e| Error::InternalErr(format!("Could not insert into queue {job_id} with tag {tag}, schedule_path {schedule_path:?}, script_path: {script_path:?}, email {email}, workspace_id {workspace_id}: {e:#}")))?; @@ -4079,7 +3886,7 @@ pub async fn push<'c, 'd, R: rsmq_async::RsmqConnection + Send + 'c>( folders.as_slice(), job_authed.groups.as_slice(), workspace_id, - ).execute(&mut tx).await { + ).execute(&mut *tx).await { tracing::error!("Could not insert job_perms for job {job_id}: {err:#}"); } } @@ -4129,7 +3936,7 @@ pub async fn push<'c, 'd, R: rsmq_async::RsmqConnection + Send + 'c>( } audit_log( - &mut tx, + &mut *tx, &audit_author, operation_name, ActionKind::Execute, @@ -4140,9 +3947,6 @@ pub async fn push<'c, 'd, R: rsmq_async::RsmqConnection + Send + 'c>( .instrument(tracing::info_span!("job_run", email = &email)) .await?; } - if let Some(ref mut rsmq) = tx.rsmq { - rsmq.send_message(job_id.to_bytes_le().to_vec(), scheduled_for_o, tag); - } Ok((uuid, tx)) } diff --git a/backend/windmill-queue/src/lib.rs b/backend/windmill-queue/src/lib.rs index bb29ee9aac28b..b47a3c32383f6 100644 --- a/backend/windmill-queue/src/lib.rs +++ b/backend/windmill-queue/src/lib.rs @@ -7,8 +7,6 @@ */ mod jobs; -mod queue_transaction; pub mod schedule; pub use jobs::*; -pub use queue_transaction::*; diff --git a/backend/windmill-queue/src/queue_transaction.rs b/backend/windmill-queue/src/queue_transaction.rs deleted file mode 100644 index 0ec265e057aab..0000000000000 --- a/backend/windmill-queue/src/queue_transaction.rs +++ /dev/null @@ -1,169 +0,0 @@ -use std::{fmt::Debug, future::Future, pin::Pin}; - -use futures_core::{future::BoxFuture, stream::BoxStream}; -use rsmq_async::{RedisBytes, RsmqConnection}; -use sqlx::{Database, Postgres, Transaction}; - -pub enum RedisOp { - SendMessage(RedisBytes, Option>, String), - DeleteMessage(String, String), -} - -unsafe impl Send for RedisOp {} - -impl RedisOp { - pub async fn apply(self, rsmq: &mut R) -> Result<(), rsmq_async::RsmqError> { - match self { - RedisOp::SendMessage(bytes, time, queue) => { - rsmq.send_message( - &queue, - bytes, - time.map(|t| (t - chrono::Utc::now()).num_seconds()) - .and_then(|e| e.try_into().ok()), - ) - .await?; - } - RedisOp::DeleteMessage(id, queue) => { - rsmq.delete_message(&queue, &id).await?; - } - }; - - Ok(()) - } -} - -pub struct RedisTransaction { - rsmq: R, - queued_ops: Vec, -} - -impl From for RedisTransaction { - fn from(value: R) -> Self { - Self { rsmq: value, queued_ops: Vec::new() } - } -} - -impl RedisTransaction { - pub async fn commit(self) -> Result<(), rsmq_async::RsmqError> { - let mut rsmq = self.rsmq; - for op in self.queued_ops { - op.apply(&mut rsmq).await?; - } - Ok(()) - } - - pub fn send_message>( - &mut self, - bytes: E, - delay_until: Option>, - queue: String, - ) { - self.queued_ops - .push(RedisOp::SendMessage(bytes.into(), delay_until, queue)) - } - - pub fn delete_message(&mut self, id: String, queue: String) { - self.queued_ops.push(RedisOp::DeleteMessage(id, queue)) - } -} - -pub struct QueueTransaction<'c, R: RsmqConnection> { - pub rsmq: Option>, - transaction: Transaction<'c, Postgres>, -} - -impl<'c, R: RsmqConnection> From<(Option, Transaction<'c, Postgres>)> - for QueueTransaction<'c, R> -{ - fn from(value: (Option, Transaction<'c, Postgres>)) -> Self { - Self { rsmq: value.0.map(|e| e.into()), transaction: value.1 } - } -} - -impl<'c, R: RsmqConnection> Debug for QueueTransaction<'c, R> { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("QueueTransaction") - .field("rsmq", &self.rsmq.as_ref().map(|_| ())) // do not require R: Debug - .field("transaction", &self.transaction) - .finish() - } -} - -impl<'c, R: RsmqConnection> QueueTransaction<'c, R> { - pub async fn commit(self) -> Result<(), windmill_common::error::Error> { - self.transaction.commit().await?; - if let Some(rsmq) = self.rsmq { - rsmq.commit().await.map_err(|e| anyhow::anyhow!(e))?; - } - - Ok(()) - } - - pub fn transaction_mut<'a>(&'a mut self) -> &'a mut Transaction<'c, Postgres> { - &mut self.transaction - } -} - -impl<'c, 'b, R: RsmqConnection + Send> sqlx::Executor<'b> for &'b mut QueueTransaction<'c, R> { - type Database = Postgres; - - fn fetch_many<'e, 'q: 'e, E: 'q>( - self, - query: E, - ) -> BoxStream< - 'e, - Result< - sqlx::Either< - ::QueryResult, - ::Row, - >, - sqlx::Error, - >, - > - where - 'b: 'e, - E: sqlx::Execute<'q, Self::Database>, - { - self.transaction.fetch_many(query) - } - - fn fetch_optional<'e, 'q: 'e, E: 'q>( - self, - query: E, - ) -> BoxFuture<'e, Result::Row>, sqlx::Error>> - where - 'b: 'e, - E: sqlx::Execute<'q, Self::Database>, - { - self.transaction.fetch_optional(query) - } - - fn prepare_with<'e, 'q>( - self, - sql: &'q str, - parameters: &'e [::TypeInfo], - ) -> Pin< - Box< - dyn Future::Statement<'q>, sqlx::Error>> - + Send - + 'e, - >, - > - where - 'q: 'e, - 'c: 'e, - 'b: 'e, - { - self.transaction.prepare_with(sql, parameters) - } - - fn describe<'e, 'q: 'e>( - self, - sql: &'q str, - ) -> BoxFuture<'e, Result, sqlx::Error>> - where - 'b: 'e, - { - self.transaction.describe(sql) - } -} diff --git a/backend/windmill-queue/src/schedule.rs b/backend/windmill-queue/src/schedule.rs index d95a0cc1e127a..23e6552f4e59b 100644 --- a/backend/windmill-queue/src/schedule.rs +++ b/backend/windmill-queue/src/schedule.rs @@ -8,7 +8,6 @@ use crate::push; use crate::PushIsolationLevel; -use crate::QueueTransaction; use anyhow::Context; use sqlx::{query_scalar, Postgres, Transaction}; use std::collections::HashMap; @@ -26,12 +25,12 @@ use windmill_common::{ utils::{now_from_db, ScheduleType, StripPath}, }; -pub async fn push_scheduled_job<'c, R: rsmq_async::RsmqConnection + Send + 'c>( +pub async fn push_scheduled_job<'c>( db: &DB, - mut tx: QueueTransaction<'c, R>, + mut tx: Transaction<'c, Postgres>, schedule: &Schedule, authed: Option<&Authed>, -) -> Result> { +) -> Result> { if !*LICENSE_KEY_VALID.read().await { return Err(error::Error::BadRequest( "License key is not valid. Go to your superadmin settings to update your license key." @@ -44,7 +43,7 @@ pub async fn push_scheduled_job<'c, R: rsmq_async::RsmqConnection + Send + 'c>( let tz = chrono_tz::Tz::from_str(&schedule.timezone) .map_err(|e| error::Error::BadRequest(e.to_string()))?; - let now = now_from_db(&mut tx).await?; + let now = now_from_db(&mut *tx).await?; let starting_from = match schedule.paused_until { Some(paused_until) if paused_until > now => paused_until.with_timezone(&tz), @@ -55,7 +54,7 @@ pub async fn push_scheduled_job<'c, R: rsmq_async::RsmqConnection + Send + 'c>( &schedule.workspace_id, &schedule.path ) - .execute(&mut tx) + .execute(&mut *tx) .await .context("Failed to clear paused_until for schedule")?; } @@ -77,7 +76,7 @@ pub async fn push_scheduled_job<'c, R: rsmq_async::RsmqConnection + Send + 'c>( &schedule.path, next ) - .fetch_one(&mut tx) + .fetch_one(&mut *tx) .await? .unwrap_or(false); @@ -110,7 +109,7 @@ pub async fn push_scheduled_job<'c, R: rsmq_async::RsmqConnection + Send + 'c>( &schedule.script_path, &schedule.workspace_id, ) - .fetch_optional(&mut tx) + .fetch_optional(&mut *tx) .await?; let (tag, dedicated_worker) = r .map(|x| (x.tag, x.dedicated_worker)) @@ -137,7 +136,7 @@ pub async fn push_scheduled_job<'c, R: rsmq_async::RsmqConnection + Send + 'c>( priority, timeout, ) = windmill_common::get_latest_hash_for_path( - tx.transaction_mut(), + &mut tx, &schedule.workspace_id, &schedule.script_path, ) @@ -205,7 +204,7 @@ pub async fn push_scheduled_job<'c, R: rsmq_async::RsmqConnection + Send + 'c>( &schedule.workspace_id, &schedule.path ) - .execute(&mut tx) + .execute(&mut *tx) .await { tracing::error!( diff --git a/backend/windmill-worker/Cargo.toml b/backend/windmill-worker/Cargo.toml index e5b47b7a812fe..f0265accdf77d 100644 --- a/backend/windmill-worker/Cargo.toml +++ b/backend/windmill-worker/Cargo.toml @@ -57,7 +57,6 @@ mappable-rc.workspace = true git-version.workspace = true dyn-iter.workspace = true once_cell.workspace = true -rsmq_async.workspace = true tokio-postgres.workspace = true bit-vec.workspace = true deno_fetch = { workspace = true, optional = true } diff --git a/backend/windmill-worker/src/result_processor.rs b/backend/windmill-worker/src/result_processor.rs index fa08fe377bd11..883953ecec166 100644 --- a/backend/windmill-worker/src/result_processor.rs +++ b/backend/windmill-worker/src/result_processor.rs @@ -45,7 +45,7 @@ use crate::{ AuthedClient, JobCompleted, JobCompletedSender, SameWorkerSender, SendResult, INIT_SCRIPT_TAG, }; -pub fn start_background_processor( +pub fn start_background_processor( mut job_completed_rx: Receiver, job_completed_sender: Sender, same_worker_queue_size: Arc, @@ -54,14 +54,10 @@ pub fn start_background_processor( db: DB, worker_dir: String, same_worker_tx: SameWorkerSender, - rsmq: Option, worker_name: String, killpill_tx: sync::broadcast::Sender<()>, is_dedicated_worker: bool, -) -> JoinHandle<()> -where - R: rsmq_async::RsmqConnection + Send + Sync + Clone + 'static, -{ +) -> JoinHandle<()> { tokio::spawn(async move { let mut has_been_killed = false; @@ -81,8 +77,6 @@ where match sr { SendResult::JobCompleted(jc) => { - let rsmq = rsmq.clone(); - let is_init_script_and_failure = !jc.success && jc.job.tag.as_str() == INIT_SCRIPT_TAG; let is_dependency_job = matches!( @@ -96,7 +90,6 @@ where &db, &worker_dir, &same_worker_tx, - rsmq, &worker_name, job_completed_sender.clone(), #[cfg(feature = "benchmark")] @@ -155,7 +148,6 @@ where same_worker_tx.clone(), &worker_dir, stop_early_override, - rsmq.clone(), &worker_name, job_completed_sender.clone(), #[cfg(feature = "benchmark")] @@ -315,15 +307,12 @@ pub async fn process_result( } } -pub async fn handle_receive_completed_job< - R: rsmq_async::RsmqConnection + Send + Sync + Clone + 'static, ->( +pub async fn handle_receive_completed_job( jc: JobCompleted, base_internal_url: &str, db: &DB, worker_dir: &str, same_worker_tx: &SameWorkerSender, - rsmq: Option, worker_name: &str, job_completed_tx: Sender, #[cfg(feature = "benchmark")] bench: &mut BenchmarkIter, @@ -345,7 +334,6 @@ pub async fn handle_receive_completed_job< db, &worker_dir, same_worker_tx.clone(), - rsmq.clone(), worker_name, job_completed_tx.clone(), #[cfg(feature = "benchmark")] @@ -363,7 +351,6 @@ pub async fn handle_receive_completed_job< false, same_worker_tx.clone(), &worker_dir, - rsmq.clone(), worker_name, job_completed_tx, #[cfg(feature = "benchmark")] @@ -374,13 +361,12 @@ pub async fn handle_receive_completed_job< } #[tracing::instrument(name = "completed_job", level = "info", skip_all, fields(job_id = %job.id))] -pub async fn process_completed_job( +pub async fn process_completed_job( JobCompleted { job, result, mem_peak, success, cached_res_path, canceled_by, .. }: JobCompleted, client: &AuthedClient, db: &DB, worker_dir: &str, same_worker_tx: SameWorkerSender, - rsmq: Option, worker_name: &str, job_completed_tx: Sender, #[cfg(feature = "benchmark")] bench: &mut BenchmarkIter, @@ -404,7 +390,6 @@ pub async fn process_completed_job( +pub async fn handle_job_error( db: &Pool, client: &AuthedClient, job: &QueuedJob, @@ -494,7 +476,6 @@ pub async fn handle_job_error, worker_name: &str, job_completed_tx: Sender, #[cfg(feature = "benchmark")] bench: &mut BenchmarkIter, @@ -504,7 +485,6 @@ pub async fn handle_job_error json!({"message": err.to_string(), "name": "InternalErr"}), }; - let rsmq_2 = rsmq.clone(); let update_job_future = || async { append_logs( &job.id, @@ -519,7 +499,6 @@ pub async fn handle_job_error( +pub async fn run_worker( db: &Pool, hostname: &str, worker_name: String, @@ -718,7 +718,6 @@ pub async fn run_worker, killpill_tx: tokio::sync::broadcast::Sender<()>, base_internal_url: &str, - rsmq: Option, agent_mode: bool, ) { #[cfg(not(feature = "enterprise"))] @@ -987,7 +986,6 @@ pub async fn run_worker( +async fn queue_init_bash_maybe<'c>( db: &Pool, same_worker_tx: SameWorkerSender, worker_name: &str, - rsmq: Option, ) -> error::Result { if let Some(content) = WORKER_CONFIG.read().await.init_bash.clone() { - let tx = PushIsolationLevel::IsolatedRoot(db.clone(), rsmq); + let tx = PushIsolationLevel::IsolatedRoot(db.clone()); let ehm = HashMap::new(); let (uuid, inner_tx) = push( &db, @@ -1756,7 +1749,7 @@ pub struct PreviousResult<'a> { } #[tracing::instrument(name = "job", level = "info", skip_all, fields(job_id = %job.id))] -async fn handle_queued_job( +async fn handle_queued_job( job: Arc, raw_code: Option, raw_lock: Option, @@ -1769,7 +1762,6 @@ async fn handle_queued_job( job_dir: &str, same_worker_tx: SameWorkerSender, base_internal_url: &str, - rsmq: Option, job_completed_tx: JobCompletedSender, occupancy_metrics: &mut OccupancyMetrics, #[cfg(feature = "benchmark")] bench: &mut BenchmarkIter, @@ -1840,7 +1832,8 @@ async fn handle_queued_job( (None, None, None) => sqlx::query!( "SELECT raw_code, raw_lock, raw_flow AS \"raw_flow: Json>\" FROM job WHERE id = $1 AND workspace_id = $2 LIMIT 1", - &job.id, job.workspace_id + &job.id, + job.workspace_id ) .fetch_one(db) .await @@ -1948,7 +1941,6 @@ async fn handle_queued_job( None, same_worker_tx, worker_dir, - rsmq, job_completed_tx.0.clone(), ) .await?; @@ -2005,7 +1997,6 @@ async fn handle_queued_job( worker_dir, base_internal_url, &client.get_token().await, - rsmq.clone(), occupancy_metrics, ) .await @@ -2022,7 +2013,6 @@ async fn handle_queued_job( worker_dir, base_internal_url, &client.get_token().await, - rsmq.clone(), occupancy_metrics, ) .await @@ -2037,7 +2027,6 @@ async fn handle_queued_job( worker_dir, base_internal_url, &client.get_token().await, - rsmq.clone(), occupancy_metrics, ) .await @@ -2246,8 +2235,7 @@ async fn handle_code_execution_job( }; ContentReqLangEnvs { - content: raw_code - .unwrap_or_else(|| "no raw code".to_owned()), + content: raw_code.unwrap_or_else(|| "no raw code".to_owned()), lockfile: raw_lock, language: job.language.to_owned(), envs: None, diff --git a/backend/windmill-worker/src/worker_flow.rs b/backend/windmill-worker/src/worker_flow.rs index 3d041c60f5bfc..bb9c82f39d9a5 100644 --- a/backend/windmill-worker/src/worker_flow.rs +++ b/backend/windmill-worker/src/worker_flow.rs @@ -24,7 +24,7 @@ use serde::{Deserialize, Serialize}; use serde_json::value::RawValue; use serde_json::{json, Value}; use sqlx::types::Json; -use sqlx::FromRow; +use sqlx::{FromRow, Postgres, Transaction}; use tokio::sync::mpsc::Sender; use tracing::instrument; use uuid::Uuid; @@ -58,12 +58,10 @@ use windmill_queue::{ type DB = sqlx::Pool; -use windmill_queue::{canceled_job_to_result, push, QueueTransaction}; +use windmill_queue::{canceled_job_to_result, push}; // #[instrument(level = "trace", skip_all)] -pub async fn update_flow_status_after_job_completion< - R: rsmq_async::RsmqConnection + Send + Sync + Clone, ->( +pub async fn update_flow_status_after_job_completion( db: &DB, client: &AuthedClient, flow: uuid::Uuid, @@ -75,7 +73,6 @@ pub async fn update_flow_status_after_job_completion< same_worker_tx: SameWorkerSender, worker_dir: &str, stop_early_override: Option, - rsmq: Option, worker_name: &str, job_completed_tx: Sender, #[cfg(feature = "benchmark")] bench: &mut BenchmarkIter, @@ -97,7 +94,6 @@ pub async fn update_flow_status_after_job_completion< worker_dir, stop_early_override, false, - rsmq.clone(), worker_name, job_completed_tx.clone(), #[cfg(feature = "benchmark")] @@ -119,7 +115,6 @@ pub async fn update_flow_status_after_job_completion< worker_dir, nrec.stop_early_override, nrec.skip_error_handler, - rsmq.clone(), worker_name, job_completed_tx.clone(), #[cfg(feature = "benchmark")] @@ -145,7 +140,6 @@ pub async fn update_flow_status_after_job_completion< worker_dir, nrec.stop_early_override, nrec.skip_error_handler, - rsmq.clone(), worker_name, job_completed_tx.clone(), #[cfg(feature = "benchmark")] @@ -177,9 +171,7 @@ struct RecoveryObject { } // #[instrument(level = "trace", skip_all)] -pub async fn update_flow_status_after_job_completion_internal< - R: rsmq_async::RsmqConnection + Send + Sync + Clone, ->( +pub async fn update_flow_status_after_job_completion_internal( db: &DB, client: &AuthedClient, flow: uuid::Uuid, @@ -192,7 +184,6 @@ pub async fn update_flow_status_after_job_completion_internal< worker_dir: &str, stop_early_override: Option, skip_error_handler: bool, - rsmq: Option, worker_name: &str, job_completed_tx: Sender, #[cfg(feature = "benchmark")] bench: &mut BenchmarkIter, @@ -407,7 +398,7 @@ pub async fn update_flow_status_after_job_completion_internal< })?; } - let mut tx: QueueTransaction<'_, _> = (rsmq.clone(), db.begin().await?).into(); + let mut tx = db.begin().await?; add_time!(bench, "process module status START"); @@ -454,7 +445,7 @@ pub async fn update_flow_status_after_job_completion_internal< flow ) } - .fetch_one(&mut tx) + .fetch_one(&mut *tx) .await.map_err(|e| { Error::InternalErr(format!( "error while fetching iterator index: {e:#}" @@ -498,7 +489,7 @@ pub async fn update_flow_status_after_job_completion_internal< old_status.step, flow )} - .fetch_one(&mut tx) + .fetch_one(&mut *tx) .await .map_err(|e| { Error::InternalErr(format!( @@ -528,7 +519,7 @@ pub async fn update_flow_status_after_job_completion_internal< "SELECT success FROM completed_job WHERE id = ANY($1)", jobs.as_slice() ) - .fetch_all(&mut tx) + .fetch_all(&mut *tx) .await .map_err(|e| { Error::InternalErr(format!( @@ -748,7 +739,7 @@ pub async fn update_flow_status_after_job_completion_internal< json!(old_status.step + 1), flow ) - .execute(&mut tx) + .execute(&mut *tx) .await .map_err(|e| { Error::InternalErr(format!("error while setting flow index for {flow}: {e:#}")) @@ -769,7 +760,7 @@ pub async fn update_flow_status_after_job_completion_internal< "SELECT flow_status->'failure_module'->>'parent_module' FROM queue WHERE id = $1", flow ) - .fetch_one(&mut tx) + .fetch_one(&mut *tx) .await.map_err(|e| { Error::InternalErr(format!( "error while fetching failure module: {e:#}" @@ -786,7 +777,7 @@ pub async fn update_flow_status_after_job_completion_internal< }), flow ) - .execute(&mut tx) + .execute(&mut *tx) .await .map_err(|e| { Error::InternalErr(format!( @@ -801,7 +792,7 @@ pub async fn update_flow_status_after_job_completion_internal< json!(new_status), flow ) - .execute(&mut tx) + .execute(&mut *tx) .await .map_err(|e| { Error::InternalErr(format!( @@ -817,7 +808,7 @@ pub async fn update_flow_status_after_job_completion_internal< json!(new_status), flow ) - .execute(&mut tx) + .execute(&mut *tx) .await .map_err(|e| { Error::InternalErr(format!("error while setting new flow status: {e:#}")) @@ -832,7 +823,7 @@ pub async fn update_flow_status_after_job_completion_internal< json!(job_result), flow ) - .execute(&mut tx) + .execute(&mut *tx) .await.map_err(|e| { Error::InternalErr(format!( "error while setting leaf jobs: {e:#}" @@ -908,7 +899,7 @@ pub async fn update_flow_status_after_job_completion_internal< RETURNING flow_status", ) .bind(flow) - .execute(&mut tx) + .execute(&mut *tx) .await .context("remove flow status retry")?; } @@ -916,7 +907,7 @@ pub async fn update_flow_status_after_job_completion_internal< let flow_job = sqlx::query_as::<_, PulledJob>("SELECT * FROM queue WHERE id = $1 AND workspace_id = $2") .bind(flow) .bind(w_id) - .fetch_optional(&mut tx) + .fetch_optional(&mut *tx) .await .map_err(Into::::into)? .ok_or_else(|| Error::InternalErr(format!("requiring flow to be in the queue")))?; @@ -1040,8 +1031,7 @@ pub async fn update_flow_status_after_job_completion_internal< reason: flow_job.canceled_reason.clone(), }), canceled_job_to_result(&flow_job), - rsmq.clone(), - worker_name, + worker_name, true, #[cfg(feature = "benchmark")] bench, @@ -1084,8 +1074,7 @@ pub async fn update_flow_status_after_job_completion_internal< Json(&nresult), 0, None, - rsmq.clone(), - true, + true, #[cfg(feature = "benchmark")] bench, ) @@ -1103,8 +1092,7 @@ pub async fn update_flow_status_after_job_completion_internal< ), 0, None, - rsmq.clone(), - true, + true, #[cfg(feature = "benchmark")] bench, ) @@ -1122,8 +1110,7 @@ pub async fn update_flow_status_after_job_completion_internal< Some(nresult.clone()), same_worker_tx.clone(), worker_dir, - rsmq.clone(), - job_completed_tx, + job_completed_tx, ) .await { @@ -1142,8 +1129,7 @@ pub async fn update_flow_status_after_job_completion_internal< 0, None, e, - rsmq.clone(), - worker_name, + worker_name, true, #[cfg(feature = "benchmark")] bench, @@ -1188,14 +1174,14 @@ fn find_flow_job_index(flow_jobs: &Vec, job_id_for_status: &Uuid) -> Optio flow_jobs.iter().position(|x| x == job_id_for_status) } -async fn set_success_in_flow_job_success<'c, R: rsmq_async::RsmqConnection + Send>( +async fn set_success_in_flow_job_success<'c>( flow_jobs_success: &Option>>, flow_jobs: &Vec, job_id_for_status: &Uuid, old_status: &FlowStatus, flow: Uuid, success: bool, - tx: &mut QueueTransaction<'c, R>, + tx: &mut Transaction<'c, Postgres>, ) -> error::Result<()> { if flow_jobs_success.is_some() { let position = find_flow_job_index(flow_jobs, job_id_for_status); @@ -1207,7 +1193,7 @@ async fn set_success_in_flow_job_success<'c, R: rsmq_async::RsmqConnection + Sen position as i32, json!(success) ) - .execute(tx) + .execute(&mut **tx) .await.map_err(|e| { Error::InternalErr(format!( "error while setting flow_jobs_success: {e:#}" @@ -1501,7 +1487,7 @@ async fn transform_input( } #[instrument(level = "trace", skip_all)] -pub async fn handle_flow( +pub async fn handle_flow( flow_job: Arc, flow_value: Option, db: &sqlx::Pool, @@ -1509,7 +1495,6 @@ pub async fn handle_flow( last_result: Option>>, same_worker_tx: SameWorkerSender, worker_dir: &str, - rsmq: Option, job_completed_tx: Sender, ) -> anyhow::Result<()> { let flow = flow_value @@ -1524,19 +1509,18 @@ pub async fn handle_flow( && flow_job.script_path.is_some() && status.step == 0 { - let mut tx: QueueTransaction<'_, R> = (rsmq.clone(), db.begin().await?).into(); + let mut tx = db.begin().await?; let schedule_path = flow_job.schedule_path.as_ref().unwrap(); let schedule = - get_schedule_opt(tx.transaction_mut(), &flow_job.workspace_id, schedule_path).await?; + get_schedule_opt(&mut tx, &flow_job.workspace_id, schedule_path).await?; tx.commit().await?; if let Some(schedule) = schedule { if let Err(err) = handle_maybe_scheduled_job( - rsmq.clone(), - db, + db, &flow_job, &schedule, flow_job.script_path.as_ref().unwrap(), @@ -1567,7 +1551,6 @@ pub async fn handle_flow( last_result, same_worker_tx, worker_dir, - rsmq, job_completed_tx, ) .await?; @@ -1620,7 +1603,7 @@ lazy_static::lazy_static! { } // #[async_recursion] // #[instrument(level = "trace", skip_all)] -async fn push_next_flow_job( +async fn push_next_flow_job( flow_job: Arc, mut status: FlowStatus, flow: FlowValue, @@ -1629,7 +1612,6 @@ async fn push_next_flow_job last_job_result: Option>>, same_worker_tx: SameWorkerSender, worker_dir: &str, - rsmq: Option, job_completed_tx: Sender, ) -> error::Result<()> { let job_root = flow_job @@ -2367,7 +2349,7 @@ async fn push_next_flow_job ContinuePayload::ForloopJobs { n, .. } => *n, }; - let mut tx: QueueTransaction<'_, R> = (rsmq.clone(), db.begin().await?).into(); + let mut tx = db.begin().await?; let nargs = args.as_ref(); for i in (0..len).into_iter() { if i % 100 == 0 && i != 0 { @@ -2559,7 +2541,7 @@ async fn push_next_flow_job root_job, flow_job.workspace_id, ) - .fetch_optional(&mut tx) + .fetch_optional(&mut *tx) .await? .map(|x| x.into()) } else { @@ -2616,7 +2598,7 @@ async fn push_next_flow_job (i as u16 - p + 1) as i32, uuid, ) - .execute(&mut inner_tx) + .execute(&mut *inner_tx) .await?; } tracing::debug!(id = %flow_job.id, root_id = %job_root, "updated suspend for {uuid}"); @@ -2635,7 +2617,7 @@ async fn push_next_flow_job ) .bind(uuid_singleton_json) .bind(root_job.unwrap_or(flow_job.id)) - .execute(&mut inner_tx) + .execute(&mut *inner_tx) .await?; } @@ -2659,7 +2641,7 @@ async fn push_next_flow_job flow_job.id, uuid ) - .execute(&mut tx) + .execute(&mut *tx) .await?; tracing::debug!(id = %flow_job.id, root_id = %job_root, "updated parallel monitor lock for {uuid}"); } @@ -2769,7 +2751,7 @@ async fn push_next_flow_job json!(flow.modules.len()), flow_job.id ) - .execute(&mut tx) + .execute(&mut *tx) .await?; } Step::PreprocessorStep => { @@ -2782,7 +2764,7 @@ async fn push_next_flow_job json!(-1), flow_job.id ) - .execute(&mut tx) + .execute(&mut *tx) .await?; } Step::Step(i) => { @@ -2796,7 +2778,7 @@ async fn push_next_flow_job json!(i), flow_job.id ) - .execute(&mut tx) + .execute(&mut *tx) .await?; } }; @@ -2809,7 +2791,7 @@ async fn push_next_flow_job WHERE id = $1", flow_job.id ) - .execute(&mut tx) + .execute(&mut *tx) .await?; tx.commit().await?; @@ -2861,7 +2843,7 @@ async fn push_next_flow_job // .bind(json!(status_module)) // .bind(json!(next_step.unwrap_or(i))) // .bind(job_id) -// .fetch_one(&mut tx) +// .fetch_one(&mut *tx) // .await?; // tx.commit().await?; diff --git a/backend/windmill-worker/src/worker_lockfiles.rs b/backend/windmill-worker/src/worker_lockfiles.rs index 46d2954aae3b3..0de308337ab72 100644 --- a/backend/windmill-worker/src/worker_lockfiles.rs +++ b/backend/windmill-worker/src/worker_lockfiles.rs @@ -202,7 +202,7 @@ pub fn extract_relative_imports( } } #[tracing::instrument(level = "trace", skip_all)] -pub async fn handle_dependency_job( +pub async fn handle_dependency_job( job: &QueuedJob, raw_code: Option, mem_peak: &mut i32, @@ -213,7 +213,6 @@ pub async fn handle_dependency_job, occupancy_metrics: &mut OccupancyMetrics, ) -> error::Result> { let raw_code = match raw_code { @@ -314,7 +313,6 @@ pub async fn handle_dependency_job( +async fn trigger_dependents_to_recompute_dependencies( w_id: &str, script_path: &str, deployment_message: Option, @@ -399,7 +394,6 @@ async fn trigger_dependents_to_recompute_dependencies< created_by: &str, permissioned_as: &str, db: &sqlx::Pool, - rsmq: Option, mut already_visited: Vec, ) -> error::Result<()> { let script_importers = sqlx::query!( @@ -418,8 +412,7 @@ async fn trigger_dependents_to_recompute_dependencies< if already_visited.contains(&s.importer_path) { continue; } - let tx: PushIsolationLevel<'_, R> = - PushIsolationLevel::IsolatedRoot(db.clone(), rsmq.clone()); + let tx = PushIsolationLevel::IsolatedRoot(db.clone()); let mut args: HashMap> = HashMap::new(); if let Some(ref dm) = deployment_message { args.insert("deployment_message".to_string(), to_raw_value(&dm)); @@ -526,7 +519,7 @@ async fn trigger_dependents_to_recompute_dependencies< Ok(()) } -pub async fn handle_flow_dependency_job( +pub async fn handle_flow_dependency_job( job: &QueuedJob, raw_flow: Option>>, mem_peak: &mut i32, @@ -537,7 +530,6 @@ pub async fn handle_flow_dependency_job, occupancy_metrics: &mut OccupancyMetrics, ) -> error::Result> { let job_path = job.script_path.clone().ok_or_else(|| { @@ -661,7 +653,6 @@ pub async fn handle_flow_dependency_job( +pub async fn handle_app_dependency_job( job: &QueuedJob, mem_peak: &mut i32, canceled_by: &mut Option, @@ -1178,7 +1169,6 @@ pub async fn handle_app_dependency_job, occupancy_metrics: &mut OccupancyMetrics, ) -> error::Result<()> { let job_path = job.script_path.clone().ok_or_else(|| { @@ -1240,7 +1230,6 @@ pub async fn handle_app_dependency_job