diff --git a/poetry.lock b/poetry.lock index 7919bd5a..c1e3386f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. [[package]] name = "anyio" @@ -520,38 +520,38 @@ toml = ["tomli"] [[package]] name = "cryptography" -version = "43.0.1" +version = "43.0.3" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7" files = [ - {file = "cryptography-43.0.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a"}, - {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042"}, - {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494"}, - {file = "cryptography-43.0.1-cp37-abi3-win32.whl", hash = "sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2"}, - {file = "cryptography-43.0.1-cp37-abi3-win_amd64.whl", hash = "sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d"}, - {file = "cryptography-43.0.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1"}, - {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa"}, - {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4"}, - {file = "cryptography-43.0.1-cp39-abi3-win32.whl", hash = "sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47"}, - {file = "cryptography-43.0.1-cp39-abi3-win_amd64.whl", hash = "sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2"}, - {file = "cryptography-43.0.1.tar.gz", hash = "sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d"}, + {file = "cryptography-43.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e1ce50266f4f70bf41a2c6dc4358afadae90e2a1e5342d3c08883df1675374f"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:443c4a81bb10daed9a8f334365fe52542771f25aedaf889fd323a853ce7377d6"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:74f57f24754fe349223792466a709f8e0c093205ff0dca557af51072ff47ab18"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9762ea51a8fc2a88b70cf2995e5675b38d93bf36bd67d91721c309df184f49bd"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:81ef806b1fef6b06dcebad789f988d3b37ccaee225695cf3e07648eee0fc6b73"}, + {file = "cryptography-43.0.3-cp37-abi3-win32.whl", hash = "sha256:cbeb489927bd7af4aa98d4b261af9a5bc025bd87f0e3547e11584be9e9427be2"}, + {file = "cryptography-43.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:f46304d6f0c6ab8e52770addfa2fc41e6629495548862279641972b6215451cd"}, + {file = "cryptography-43.0.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:8ac43ae87929a5982f5948ceda07001ee5e83227fd69cf55b109144938d96984"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:846da004a5804145a5f441b8530b4bf35afbf7da70f82409f151695b127213d5"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f996e7268af62598f2fc1204afa98a3b5712313a55c4c9d434aef49cadc91d4"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f7b178f11ed3664fd0e995a47ed2b5ff0a12d893e41dd0494f406d1cf555cab7"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:c2e6fc39c4ab499049df3bdf567f768a723a5e8464816e8f009f121a5a9f4405"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e1be4655c7ef6e1bbe6b5d0403526601323420bcf414598955968c9ef3eb7d16"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:df6b6c6d742395dd77a23ea3728ab62f98379eff8fb61be2744d4679ab678f73"}, + {file = "cryptography-43.0.3-cp39-abi3-win32.whl", hash = "sha256:d56e96520b1020449bbace2b78b603442e7e378a9b3bd68de65c782db1507995"}, + {file = "cryptography-43.0.3-cp39-abi3-win_amd64.whl", hash = "sha256:0c580952eef9bf68c4747774cde7ec1d85a6e61de97281f2dba83c7d2c806362"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d03b5621a135bffecad2c73e9f4deb1a0f977b9a8ffe6f8e002bf6c9d07b918c"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a2a431ee15799d6db9fe80c82b055bae5a752bef645bba795e8e52687c69efe3"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:281c945d0e28c92ca5e5930664c1cefd85efe80e5c0d2bc58dd63383fda29f83"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f18c716be16bc1fea8e95def49edf46b82fccaa88587a45f8dc0ff6ab5d8e0a7"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4a02ded6cd4f0a5562a8887df8b3bd14e822a90f97ac5e544c162899bc467664"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:53a583b6637ab4c4e3591a15bc9db855b8d9dee9a669b550f311480acab6eb08"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1ec0bcf7e17c0c5669d881b1cd38c4972fade441b27bda1051665faaa89bdcaa"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff"}, + {file = "cryptography-43.0.3.tar.gz", hash = "sha256:315b9001266a492a6ff443b61238f956b214dbec9910a081ba5b6646a055a805"}, ] [package.dependencies] @@ -564,7 +564,7 @@ nox = ["nox"] pep8test = ["check-sdist", "click", "mypy", "ruff"] sdist = ["build"] ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi", "cryptography-vectors (==43.0.1)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test = ["certifi", "cryptography-vectors (==43.0.3)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] [[package]] @@ -1154,6 +1154,7 @@ files = [ ] [package.dependencies] +ops-scenario = {version = ">=7.0.5,<8", optional = true, markers = "extra == \"testing\""} PyYAML = "==6.*" websocket-client = "==1.*" @@ -1331,22 +1332,22 @@ wcwidth = "*" [[package]] name = "protobuf" -version = "5.28.2" +version = "5.28.3" description = "" optional = false python-versions = ">=3.8" files = [ - {file = "protobuf-5.28.2-cp310-abi3-win32.whl", hash = "sha256:eeea10f3dc0ac7e6b4933d32db20662902b4ab81bf28df12218aa389e9c2102d"}, - {file = "protobuf-5.28.2-cp310-abi3-win_amd64.whl", hash = "sha256:2c69461a7fcc8e24be697624c09a839976d82ae75062b11a0972e41fd2cd9132"}, - {file = "protobuf-5.28.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a8b9403fc70764b08d2f593ce44f1d2920c5077bf7d311fefec999f8c40f78b7"}, - {file = "protobuf-5.28.2-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:35cfcb15f213449af7ff6198d6eb5f739c37d7e4f1c09b5d0641babf2cc0c68f"}, - {file = "protobuf-5.28.2-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:5e8a95246d581eef20471b5d5ba010d55f66740942b95ba9b872d918c459452f"}, - {file = "protobuf-5.28.2-cp38-cp38-win32.whl", hash = "sha256:87317e9bcda04a32f2ee82089a204d3a2f0d3c8aeed16568c7daf4756e4f1fe0"}, - {file = "protobuf-5.28.2-cp38-cp38-win_amd64.whl", hash = "sha256:c0ea0123dac3399a2eeb1a1443d82b7afc9ff40241433296769f7da42d142ec3"}, - {file = "protobuf-5.28.2-cp39-cp39-win32.whl", hash = "sha256:ca53faf29896c526863366a52a8f4d88e69cd04ec9571ed6082fa117fac3ab36"}, - {file = "protobuf-5.28.2-cp39-cp39-win_amd64.whl", hash = "sha256:8ddc60bf374785fb7cb12510b267f59067fa10087325b8e1855b898a0d81d276"}, - {file = "protobuf-5.28.2-py3-none-any.whl", hash = "sha256:52235802093bd8a2811abbe8bf0ab9c5f54cca0a751fdd3f6ac2a21438bffece"}, - {file = "protobuf-5.28.2.tar.gz", hash = "sha256:59379674ff119717404f7454647913787034f03fe7049cbef1d74a97bb4593f0"}, + {file = "protobuf-5.28.3-cp310-abi3-win32.whl", hash = "sha256:0c4eec6f987338617072592b97943fdbe30d019c56126493111cf24344c1cc24"}, + {file = "protobuf-5.28.3-cp310-abi3-win_amd64.whl", hash = "sha256:91fba8f445723fcf400fdbe9ca796b19d3b1242cd873907979b9ed71e4afe868"}, + {file = "protobuf-5.28.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a3f6857551e53ce35e60b403b8a27b0295f7d6eb63d10484f12bc6879c715687"}, + {file = "protobuf-5.28.3-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:3fa2de6b8b29d12c61911505d893afe7320ce7ccba4df913e2971461fa36d584"}, + {file = "protobuf-5.28.3-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:712319fbdddb46f21abb66cd33cb9e491a5763b2febd8f228251add221981135"}, + {file = "protobuf-5.28.3-cp38-cp38-win32.whl", hash = "sha256:3e6101d095dfd119513cde7259aa703d16c6bbdfae2554dfe5cfdbe94e32d548"}, + {file = "protobuf-5.28.3-cp38-cp38-win_amd64.whl", hash = "sha256:27b246b3723692bf1068d5734ddaf2fccc2cdd6e0c9b47fe099244d80200593b"}, + {file = "protobuf-5.28.3-cp39-cp39-win32.whl", hash = "sha256:135658402f71bbd49500322c0f736145731b16fc79dc8f367ab544a17eab4535"}, + {file = "protobuf-5.28.3-cp39-cp39-win_amd64.whl", hash = "sha256:70585a70fc2dd4818c51287ceef5bdba6387f88a578c86d47bb34669b5552c36"}, + {file = "protobuf-5.28.3-py3-none-any.whl", hash = "sha256:cee1757663fa32a1ee673434fcf3bf24dd54763c79690201208bafec62f19eed"}, + {file = "protobuf-5.28.3.tar.gz", hash = "sha256:64badbc49180a5e401f373f9ce7ab1d18b63f7dd4a9cdc43c92b9f0b481cef7b"}, ] [[package]] @@ -1395,6 +1396,7 @@ description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs optional = false python-versions = ">=3.8" files = [ + {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, ] @@ -1405,6 +1407,7 @@ description = "A collection of ASN.1-based protocols modules" optional = false python-versions = ">=3.8" files = [ + {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, ] @@ -1552,13 +1555,13 @@ pytz = "*" [[package]] name = "pyright" -version = "1.1.384" +version = "1.1.387" description = "Command line wrapper for pyright" optional = false python-versions = ">=3.7" files = [ - {file = "pyright-1.1.384-py3-none-any.whl", hash = "sha256:f0b6f4db2da38f27aeb7035c26192f034587875f751b847e9ad42ed0c704ac9e"}, - {file = "pyright-1.1.384.tar.gz", hash = "sha256:25e54d61f55cbb45f1195ff89c488832d7a45d59f3e132f178fdf9ef6cafc706"}, + {file = "pyright-1.1.387-py3-none-any.whl", hash = "sha256:6a1f495a261a72e12ad17e20d1ae3df4511223c773b19407cfa006229b1b08a5"}, + {file = "pyright-1.1.387.tar.gz", hash = "sha256:577de60224f7fe36505d5b181231e3a395d427b7873be0bbcaa962a29ea93a60"}, ] [package.dependencies] @@ -1930,29 +1933,29 @@ pyasn1 = ">=0.1.3" [[package]] name = "ruff" -version = "0.6.9" +version = "0.7.1" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.6.9-py3-none-linux_armv6l.whl", hash = "sha256:064df58d84ccc0ac0fcd63bc3090b251d90e2a372558c0f057c3f75ed73e1ccd"}, - {file = "ruff-0.6.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:140d4b5c9f5fc7a7b074908a78ab8d384dd7f6510402267bc76c37195c02a7ec"}, - {file = "ruff-0.6.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53fd8ca5e82bdee8da7f506d7b03a261f24cd43d090ea9db9a1dc59d9313914c"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645d7d8761f915e48a00d4ecc3686969761df69fb561dd914a773c1a8266e14e"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eae02b700763e3847595b9d2891488989cac00214da7f845f4bcf2989007d577"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d5ccc9e58112441de8ad4b29dcb7a86dc25c5f770e3c06a9d57e0e5eba48829"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:417b81aa1c9b60b2f8edc463c58363075412866ae4e2b9ab0f690dc1e87ac1b5"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c866b631f5fbce896a74a6e4383407ba7507b815ccc52bcedabb6810fdb3ef7"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b118afbb3202f5911486ad52da86d1d52305b59e7ef2031cea3425142b97d6f"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a67267654edc23c97335586774790cde402fb6bbdb3c2314f1fc087dee320bfa"}, - {file = "ruff-0.6.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3ef0cc774b00fec123f635ce5c547dac263f6ee9fb9cc83437c5904183b55ceb"}, - {file = "ruff-0.6.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:12edd2af0c60fa61ff31cefb90aef4288ac4d372b4962c2864aeea3a1a2460c0"}, - {file = "ruff-0.6.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:55bb01caeaf3a60b2b2bba07308a02fca6ab56233302406ed5245180a05c5625"}, - {file = "ruff-0.6.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:925d26471fa24b0ce5a6cdfab1bb526fb4159952385f386bdcc643813d472039"}, - {file = "ruff-0.6.9-py3-none-win32.whl", hash = "sha256:eb61ec9bdb2506cffd492e05ac40e5bc6284873aceb605503d8494180d6fc84d"}, - {file = "ruff-0.6.9-py3-none-win_amd64.whl", hash = "sha256:785d31851c1ae91f45b3d8fe23b8ae4b5170089021fbb42402d811135f0b7117"}, - {file = "ruff-0.6.9-py3-none-win_arm64.whl", hash = "sha256:a9641e31476d601f83cd602608739a0840e348bda93fec9f1ee816f8b6798b93"}, - {file = "ruff-0.6.9.tar.gz", hash = "sha256:b076ef717a8e5bc819514ee1d602bbdca5b4420ae13a9cf61a0c0a4f53a2baa2"}, + {file = "ruff-0.7.1-py3-none-linux_armv6l.whl", hash = "sha256:cb1bc5ed9403daa7da05475d615739cc0212e861b7306f314379d958592aaa89"}, + {file = "ruff-0.7.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:27c1c52a8d199a257ff1e5582d078eab7145129aa02721815ca8fa4f9612dc35"}, + {file = "ruff-0.7.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:588a34e1ef2ea55b4ddfec26bbe76bc866e92523d8c6cdec5e8aceefeff02d99"}, + {file = "ruff-0.7.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94fc32f9cdf72dc75c451e5f072758b118ab8100727168a3df58502b43a599ca"}, + {file = "ruff-0.7.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:985818742b833bffa543a84d1cc11b5e6871de1b4e0ac3060a59a2bae3969250"}, + {file = "ruff-0.7.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32f1e8a192e261366c702c5fb2ece9f68d26625f198a25c408861c16dc2dea9c"}, + {file = "ruff-0.7.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:699085bf05819588551b11751eff33e9ca58b1b86a6843e1b082a7de40da1565"}, + {file = "ruff-0.7.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:344cc2b0814047dc8c3a8ff2cd1f3d808bb23c6658db830d25147339d9bf9ea7"}, + {file = "ruff-0.7.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4316bbf69d5a859cc937890c7ac7a6551252b6a01b1d2c97e8fc96e45a7c8b4a"}, + {file = "ruff-0.7.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79d3af9dca4c56043e738a4d6dd1e9444b6d6c10598ac52d146e331eb155a8ad"}, + {file = "ruff-0.7.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c5c121b46abde94a505175524e51891f829414e093cd8326d6e741ecfc0a9112"}, + {file = "ruff-0.7.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:8422104078324ea250886954e48f1373a8fe7de59283d747c3a7eca050b4e378"}, + {file = "ruff-0.7.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:56aad830af8a9db644e80098fe4984a948e2b6fc2e73891538f43bbe478461b8"}, + {file = "ruff-0.7.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:658304f02f68d3a83c998ad8bf91f9b4f53e93e5412b8f2388359d55869727fd"}, + {file = "ruff-0.7.1-py3-none-win32.whl", hash = "sha256:b517a2011333eb7ce2d402652ecaa0ac1a30c114fbbd55c6b8ee466a7f600ee9"}, + {file = "ruff-0.7.1-py3-none-win_amd64.whl", hash = "sha256:f38c41fcde1728736b4eb2b18850f6d1e3eedd9678c914dede554a70d5241307"}, + {file = "ruff-0.7.1-py3-none-win_arm64.whl", hash = "sha256:19aa200ec824c0f36d0c9114c8ec0087082021732979a359d6f3c390a6ff2a37"}, + {file = "ruff-0.7.1.tar.gz", hash = "sha256:9d8a41d4aa2dad1575adb98a82870cf5db5f76b2938cf2206c22c940034a36f4"}, ] [[package]] @@ -2235,4 +2238,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = ">=3.8,<4.0" -content-hash = "8bca8ea1574daf605abc00cf0ba4e2736551b6ef3d16f601c8d34e7e3e602a75" +content-hash = "b60ab2c4d6c82943516dfabb20d290f82fbbd3404812901da61b69f9fde7581a" diff --git a/pyproject.toml b/pyproject.toml index e055205a..15d331da 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -31,7 +31,7 @@ package-mode = false [tool.poetry.dependencies] python = ">=3.8,<4.0" -ops = ">=2.4.1" +ops = ">=2.17.0" kazoo = ">=2.8.0" # The cosl dep could be removed from here once PYDEPS is released: @@ -88,7 +88,7 @@ optional = true pytest = ">=7.2" coverage = { extras = ["toml"], version = ">7.0" } pytest-mock = "^3.11.1" -ops-scenario = "^7.0.0" +ops = { version = ">=2.17.0", extras = ["testing"] } [tool.poetry.group.integration] optional = true diff --git a/requirements.txt b/requirements.txt index f50adf5e..21dfa336 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ certifi==2024.8.30 ; python_version >= "3.8" and python_version < "4.0" cffi==1.17.1 ; python_version >= "3.8" and python_version < "4.0" and platform_python_implementation != "PyPy" charset-normalizer==3.4.0 ; python_version >= "3.8" and python_version < "4.0" cosl==0.0.24 ; python_version >= "3.8" and python_version < "4.0" -cryptography==43.0.1 ; python_version >= "3.8" and python_version < "4.0" +cryptography==43.0.3 ; python_version >= "3.8" and python_version < "4.0" exceptiongroup==1.2.2 ; python_version >= "3.8" and python_version < "3.11" h11==0.14.0 ; python_version >= "3.8" and python_version < "4.0" httpcore==1.0.6 ; python_version >= "3.8" and python_version < "4.0" diff --git a/src/charm.py b/src/charm.py index 61d24ccc..2225bdbd 100755 --- a/src/charm.py +++ b/src/charm.py @@ -13,7 +13,6 @@ from charms.operator_libs_linux.v0 import sysctl from charms.rolling_ops.v0.rollingops import RollingOpsManager, RunWithLock from ops import ( - ActiveStatus, CollectStatusEvent, EventBase, StatusBase, @@ -51,6 +50,7 @@ def __init__(self, *args): super().__init__(*args) self.name = CHARM_KEY self.substrate: Substrates = SUBSTRATE + self.pending_inactive_statuses: list[Status] = [] # Common attrs init self.state = ClusterState(self, substrate=self.substrate) @@ -75,6 +75,7 @@ def __init__(self, *args): self.framework.observe(getattr(self.on, "install"), self._on_install) self.framework.observe(getattr(self.on, "remove"), self._on_remove) self.framework.observe(getattr(self.on, "config_changed"), self._on_roles_changed) + self.framework.observe(self.on.collect_unit_status, self._on_collect_status) self.framework.observe(self.on.collect_app_status, self._on_collect_status) # peer-cluster events are shared between all roles, so necessary to init here to avoid instantiating multiple times @@ -112,7 +113,7 @@ def _set_status(self, key: Status) -> None: log_level: DebugLevel = key.value.log_level getattr(logger, log_level.lower())(status.message) - self.unit.status = status + self.pending_inactive_statuses.append(key) def _on_roles_changed(self, _): """Handler for `config_changed` events. @@ -169,22 +170,8 @@ def _disable_enable_restart_broker(self, event: RunWithLock) -> None: return def _on_collect_status(self, event: CollectStatusEvent): - ready_to_start = self.state.ready_to_start.value.status - event.add_status(ready_to_start) - - if not isinstance(ready_to_start, ActiveStatus): - return - - if not self.state.runs_broker: - # early return, the next checks only concern the broker - return - - if not self.broker.workload.active(): - event.add_status(Status.BROKER_NOT_RUNNING.value.status) - - if not self.state.kraft_mode: - if not self.state.zookeeper.broker_active(): - event.add_status(Status.ZK_NOT_CONNECTED.value.status) + for status in self.pending_inactive_statuses + [Status.ACTIVE]: + event.add_status(status.value.status) if __name__ == "__main__": diff --git a/src/core/models.py b/src/core/models.py index 9a8f136a..9aae4e4e 100644 --- a/src/core/models.py +++ b/src/core/models.py @@ -603,7 +603,7 @@ def pod(self) -> Pod: K8s-only. """ - return self.k8s.get_pod(pod_name=self.pod_name) + return self.k8s.get_pod(self.pod_name) @cached_property def node(self) -> Node: @@ -611,7 +611,7 @@ def node(self) -> Node: K8s-only. """ - return self.k8s.get_node(pod=self.pod) + return self.k8s.get_node(self.pod_name) @cached_property def node_ip(self) -> str: @@ -619,7 +619,7 @@ def node_ip(self) -> str: K8s-only. """ - return self.k8s.get_node_ip(node=self.node) + return self.k8s.get_node_ip(self.pod_name) class ZooKeeper(RelationState): diff --git a/src/events/balancer.py b/src/events/balancer.py index 1433828b..2974a653 100644 --- a/src/events/balancer.py +++ b/src/events/balancer.py @@ -11,7 +11,6 @@ from ops import ( ActionEvent, - ActiveStatus, EventBase, InstallEvent, Object, @@ -61,6 +60,9 @@ def __init__(self, charm) -> None: config=self.charm.config, ) + # Before fast exit to avoid silently ignoring the action + self.framework.observe(getattr(self.charm.on, "rebalance_action"), self.rebalance) + # Fast exit after workload instantiation, but before any event observer if BALANCER.value not in self.charm.config.roles or not self.charm.unit.is_leader(): return @@ -82,8 +84,6 @@ def __init__(self, charm) -> None: self.framework.observe(self.charm.on.update_status, self._on_config_changed) self.framework.observe(self.charm.on.config_changed, self._on_config_changed) - self.framework.observe(getattr(self.charm.on, "rebalance_action"), self.rebalance) - def _on_install(self, event: InstallEvent) -> None: """Handler for `install` event.""" if not self.workload.container_can_connect: @@ -101,8 +101,9 @@ def _on_install(self, event: InstallEvent) -> None: def _on_start(self, event: StartEvent | PebbleReadyEvent) -> None: """Handler for `start` or `pebble-ready` events.""" - self.charm._set_status(self.charm.state.ready_to_start) - if not isinstance(self.charm.unit.status, ActiveStatus): + current_status = self.charm.state.ready_to_start + if current_status is not Status.ACTIVE: + self.charm._set_status(current_status) event.defer() return @@ -207,33 +208,36 @@ def rebalance(self, event: ActionEvent) -> None: available_brokers = [int(broker.split("/")[1]) for broker in brokers] failure_conditions = [ - (not self.charm.unit.is_leader(), "Action must be ran on the application leader"), ( - not self.balancer_manager.cruise_control.monitoring, + lambda: not self.charm.unit.is_leader(), + "Action must be ran on the application leader", + ), + ( + lambda: not self.balancer_manager.cruise_control.monitoring, "CruiseControl balancer service is not yet ready", ), ( - self.balancer_manager.cruise_control.executing, + lambda: self.balancer_manager.cruise_control.executing, "CruiseControl balancer service is currently executing a task, please try again later", ), ( - not self.balancer_manager.cruise_control.ready, + lambda: not self.balancer_manager.cruise_control.ready, "CruiseControl balancer service has not yet collected enough data to provide a partition reallocation proposal", ), ( - event.params["mode"] in (MODE_ADD, MODE_REMOVE) + lambda: event.params["mode"] in (MODE_ADD, MODE_REMOVE) and event.params.get("brokerid", None) is None, "'add' and 'remove' rebalance action require passing the 'brokerid' parameter", ), ( - event.params["mode"] in (MODE_ADD, MODE_REMOVE) + lambda: event.params["mode"] in (MODE_ADD, MODE_REMOVE) and event.params.get("brokerid") not in available_brokers, "invalid brokerid", ), ] for check, msg in failure_conditions: - if check: + if check(): logging.error(msg) event.set_results({"error": msg}) event.fail(msg) @@ -261,8 +265,6 @@ def rebalance(self, event: ActionEvent) -> None: event.set_results(sanitised_response) - self.charm._set_status(Status.ACTIVE) - @property def healthy(self) -> bool: """Checks and updates various charm lifecycle states. @@ -274,8 +276,9 @@ def healthy(self) -> bool: if not self.charm.state.runs_balancer: return True - self.charm._set_status(self.charm.state.ready_to_start) - if not isinstance(self.charm.unit.status, ActiveStatus): + current_status = self.charm.state.ready_to_start + if current_status is not Status.ACTIVE: + self.charm._set_status(current_status) return False if not self.workload.active() and self.charm.unit.is_leader(): diff --git a/src/events/broker.py b/src/events/broker.py index b0274c9a..cd579978 100644 --- a/src/events/broker.py +++ b/src/events/broker.py @@ -11,7 +11,6 @@ from charms.operator_libs_linux.v1.snap import SnapError from ops import ( - ActiveStatus, EventBase, InstallEvent, Object, @@ -172,8 +171,9 @@ def _on_start(self, event: StartEvent | PebbleReadyEvent) -> None: # noqa: C901 self._init_kraft_mode() # FIXME ready to start probably needs to account for credentials being created beforehand - self.charm._set_status(self.charm.state.ready_to_start) - if not isinstance(self.charm.unit.status, ActiveStatus): + current_status = self.charm.state.ready_to_start + if current_status is not Status.ACTIVE: + self.charm._set_status(current_status) event.defer() return @@ -223,7 +223,7 @@ def _on_start(self, event: StartEvent | PebbleReadyEvent) -> None: # noqa: C901 self.charm.on.update_status.emit() # only log once on successful 'on-start' run - if isinstance(self.charm.unit.status, ActiveStatus): + if not self.charm.pending_inactive_statuses: logger.info(f'Broker {self.charm.unit.name.split("/")[1]} connected') def _on_config_changed(self, event: EventBase) -> None: @@ -339,8 +339,6 @@ def _on_update_status(self, _: UpdateStatusEvent) -> None: self.charm._set_status(Status.BROKER_NOT_RUNNING) return - self.charm._set_status(Status.ACTIVE) - def _on_secret_changed(self, event: SecretChangedEvent) -> None: """Handler for `secret_changed` events.""" if not event.secret.label or not self.charm.state.cluster.relation: @@ -408,8 +406,9 @@ def healthy(self) -> bool: Returns: True if service is alive and active. Otherwise False """ - self.charm._set_status(self.charm.state.ready_to_start) - if not isinstance(self.charm.unit.status, ActiveStatus): + current_status = self.charm.state.ready_to_start + if current_status is not Status.ACTIVE: + self.charm._set_status(current_status) return False if not self.workload.active(): diff --git a/src/events/tls.py b/src/events/tls.py index e0d9459f..12253a9f 100644 --- a/src/events/tls.py +++ b/src/events/tls.py @@ -27,7 +27,6 @@ RelationJoinedEvent, ) from ops.framework import Object -from ops.model import ActiveStatus from literals import TLS_RELATION, TRUSTED_CA_RELATION, TRUSTED_CERTIFICATE_RELATION, Status @@ -139,7 +138,6 @@ def _trusted_relation_created(self, event: EventBase) -> None: # Create a "mtls" flag so a new listener (CLIENT_SSL) is created self.charm.state.cluster.update({"mtls": "enabled"}) - self.charm.app.status = ActiveStatus() def _trusted_relation_joined(self, event: RelationJoinedEvent) -> None: """Generate a CSR so the tls-certificates operator works as expected.""" diff --git a/src/events/zookeeper.py b/src/events/zookeeper.py index 80c8db9b..efe92d76 100644 --- a/src/events/zookeeper.py +++ b/src/events/zookeeper.py @@ -117,8 +117,13 @@ def _on_zookeeper_broken(self, _: RelationEvent) -> None: # Kafka keeps a meta.properties in every log.dir with a unique ClusterID # this ID is provided by ZK, and removing it on relation-broken allows # re-joining to another ZK cluster. - for storage in self.charm.model.storages["data"]: - self.charm.workload.exec(["rm", f"{storage.location}/meta.properties"]) + self.charm.workload.exec( + [ + "bash", + "-c", + f"""find {self.charm.workload.paths.data_path} -type f -name meta.properties -delete || true""", + ] + ) if not self.charm.unit.is_leader(): return diff --git a/src/managers/k8s.py b/src/managers/k8s.py index 8866e799..40adbfc7 100644 --- a/src/managers/k8s.py +++ b/src/managers/k8s.py @@ -4,8 +4,11 @@ """Manager for handling Kafka Kubernetes resources for a single Kafka pod.""" +import json import logging -from functools import cached_property +import math +import time +from functools import cache from lightkube.core.client import Client from lightkube.core.exceptions import ApiError @@ -15,13 +18,12 @@ from literals import SECURITY_PROTOCOL_PORTS, AuthMap, AuthMechanism -logger = logging.getLogger(__name__) - # default logging from lightkube httpx requests is very noisy -logging.getLogger("lightkube").disabled = True -logging.getLogger("lightkube.core.client").disabled = True -logging.getLogger("httpx").disabled = True -logging.getLogger("httpcore").disabled = True +logging.getLogger("lightkube").setLevel(logging.CRITICAL) +logging.getLogger("httpx").setLevel(logging.CRITICAL) +logging.getLogger("httpcore").setLevel(logging.CRITICAL) + +logger = logging.getLogger(__name__) class K8sManager: @@ -42,7 +44,22 @@ def __init__( "SSL": "ssl", } - @cached_property + def __eq__(self, other: object) -> bool: + """__eq__ dunder. + + Needed to get an cache hit on calls on the same method from different instances of K8sManager + as `self` is passed to methods. + """ + return isinstance(other, K8sManager) and self.__dict__ == other.__dict__ + + def __hash__(self) -> int: + """__hash__ dunder. + + K8sManager needs to be hashable so that `self` can be passed to the 'dict-like' cache. + """ + return hash(json.dumps(self.__dict__, sort_keys=True)) + + @property def client(self) -> Client: """The Lightkube client.""" return Client( # pyright: ignore[reportArgumentType] @@ -50,46 +67,34 @@ def client(self) -> Client: namespace=self.namespace, ) + @staticmethod + def get_ttl_hash(seconds=60 * 2) -> int: + """Gets a unique time hash for the cache, expiring after 2 minutes. + + When 2m has passed, a new value will be created, ensuring an cache miss + and a re-loading of that K8s API call. + """ + return math.floor(time.time() / seconds) + # --- GETTERS --- def get_pod(self, pod_name: str = "") -> Pod: """Gets the Pod via the K8s API.""" - # Allows us to get pods from other peer units - pod_name = pod_name or self.pod_name - - return self.client.get( - res=Pod, - name=self.pod_name, - ) + return self._get_pod(pod_name, self.get_ttl_hash()) - def get_node(self, pod: Pod) -> Node: + def get_node(self, pod_name: str) -> Node: """Gets the Node the Pod is running on via the K8s API.""" - if not pod.spec or not pod.spec.nodeName: - raise Exception("Could not find podSpec or nodeName") - - return self.client.get( - Node, - name=pod.spec.nodeName, - ) - - def get_node_ip(self, node: Node) -> str: - """Gets the IP Address of the Node via the K8s API.""" - # all these redundant checks are because Lightkube's typing is awful - if not node.status or not node.status.addresses: - raise Exception(f"No status found for {node}") + return self._get_node(pod_name, self.get_ttl_hash()) - for addresses in node.status.addresses: - if addresses.type in ["ExternalIP", "InternalIP", "Hostname"]: - return addresses.address - - return "" + def get_node_ip(self, pod_name: str) -> str: + """Gets the IP Address of the Node of a given Pod via the K8s API.""" + return self._get_node_ip(pod_name, self.get_ttl_hash()) def get_service(self, service_name: str) -> Service | None: """Gets the Service via the K8s API.""" - return self.client.get( - res=Service, - name=service_name, - ) + return self._get_service(service_name, self.get_ttl_hash()) + + # SERVICE BUILDERS def get_node_port( self, @@ -139,7 +144,7 @@ def get_bootstrap_nodeport(self, auth_map: AuthMap) -> int: def build_bootstrap_services(self) -> Service: """Builds a ClusterIP service for initial client connection.""" - pod = self.get_pod(pod_name=self.pod_name) + pod = self.get_pod(self.pod_name) if not pod.metadata: raise Exception(f"Could not find metadata for {pod}") @@ -231,3 +236,46 @@ def apply_service(self, service: Service) -> None: return else: raise + + # PRIVATE METHODS + + @cache + def _get_pod(self, pod_name: str = "", *_) -> Pod: + # Allows us to get pods from other peer units + pod_name = pod_name or self.pod_name + + return self.client.get( + res=Pod, + name=pod_name, + ) + + @cache + def _get_node(self, pod_name: str, *_) -> Node: + pod = self.get_pod(pod_name) + if not pod.spec or not pod.spec.nodeName: + raise Exception("Could not find podSpec or nodeName") + + return self.client.get( + Node, + name=pod.spec.nodeName, + ) + + @cache + def _get_node_ip(self, pod_name: str, *_) -> str: + # all these redundant checks are because Lightkube's typing is awful + node = self.get_node(pod_name) + if not node.status or not node.status.addresses: + raise Exception(f"No status found for {node}") + + for addresses in node.status.addresses: + if addresses.type in ["ExternalIP", "InternalIP", "Hostname"]: + return addresses.address + + return "" + + @cache + def _get_service(self, service_name: str, *_) -> Service | None: + return self.client.get( + res=Service, + name=service_name, + ) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index e3cf56a9..fb500cbf 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -124,3 +124,15 @@ def patched_node_ip(): yield patched_node_ip else: yield + + +@pytest.fixture(autouse=True) +def patched_node_port(): + if SUBSTRATE == "k8s": + with patch( + "managers.k8s.K8sManager.get_listener_nodeport", + return_value=20000, + ) as patched_node_port: + yield patched_node_port + else: + yield diff --git a/tests/unit/scenario/test_balancer.py b/tests/unit/scenario/test_balancer.py deleted file mode 100644 index fb6e6b3b..00000000 --- a/tests/unit/scenario/test_balancer.py +++ /dev/null @@ -1,308 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2024 Canonical Ltd. -# See LICENSE file for licensing details. - -import dataclasses -import json -import logging -import re -from pathlib import Path -from unittest.mock import PropertyMock, patch - -import pytest -import yaml -from ops import ActiveStatus -from scenario import Container, Context, PeerRelation, Relation, State - -from charm import KafkaCharm -from literals import ( - BALANCER_WEBSERVER_USER, - CONTAINER, - INTERNAL_USERS, - PEER, - SUBSTRATE, - ZK, - Status, -) - -pytestmark = pytest.mark.balancer - -logger = logging.getLogger(__name__) - - -CONFIG = yaml.safe_load(Path("./config.yaml").read_text()) -ACTIONS = yaml.safe_load(Path("./actions.yaml").read_text()) -METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) - - -@pytest.fixture() -def charm_configuration(): - """Enable direct mutation on configuration dict.""" - return json.loads(json.dumps(CONFIG)) - - -@pytest.fixture() -def base_state(): - - if SUBSTRATE == "k8s": - state = State(leader=True, containers=[Container(name=CONTAINER, can_connect=True)]) - - else: - state = State(leader=True) - - return state - - -@pytest.mark.skipif(SUBSTRATE == "k8s", reason="snap not used on K8s") -def test_install_blocks_snap_install_failure(charm_configuration, base_state: State): - # Given - charm_configuration["options"]["roles"]["default"] = "balancer" - ctx = Context( - KafkaCharm, - meta=METADATA, - config=charm_configuration, - actions=ACTIONS, - ) - state_in = base_state - - # When - with patch("workload.Workload.install", return_value=False), patch("workload.Workload.write"): - state_out = ctx.run(ctx.on.install(), state_in) - - # Then - assert state_out.unit_status == Status.SNAP_NOT_INSTALLED.value.status - - -@patch("workload.Workload.restart") -@patch("workload.Workload.start") -def test_stop_workload_if_not_leader( - patched_start, patched_restart, charm_configuration, base_state: State -): - # Given - charm_configuration["options"]["roles"]["default"] = "balancer" - ctx = Context( - KafkaCharm, - meta=METADATA, - config=charm_configuration, - actions=ACTIONS, - ) - state_in = dataclasses.replace(base_state, leader=False) - - # When - ctx.run(ctx.on.start(), state_in) - - # Then - assert not patched_start.called - assert not patched_restart.called - - -def test_stop_workload_if_role_not_present(charm_configuration, base_state: State): - # Given - charm_configuration["options"]["roles"]["default"] = "balancer" - ctx = Context( - KafkaCharm, - meta=METADATA, - config=charm_configuration, - actions=ACTIONS, - ) - state_in = dataclasses.replace(base_state, config={"roles": "broker"}) - - # When - with ( - patch("workload.BalancerWorkload.active", return_value=True), - patch("workload.BalancerWorkload.stop") as patched_stopped, - ): - ctx.run(ctx.on.config_changed(), state_in) - - # Then - patched_stopped.assert_called_once() - - -def test_ready_to_start_maintenance_no_peer_relation(charm_configuration, base_state: State): - # Given - charm_configuration["options"]["roles"]["default"] = "balancer" - ctx = Context( - KafkaCharm, - meta=METADATA, - config=charm_configuration, - actions=ACTIONS, - ) - state_in = base_state - - # When - state_out = ctx.run(ctx.on.start(), state_in) - - # Then - assert state_out.unit_status == Status.NO_PEER_RELATION.value.status - - -def test_ready_to_start_no_peer_cluster(charm_configuration, base_state): - """Balancer only, need a peer cluster relation.""" - # Given - charm_configuration["options"]["roles"]["default"] = "balancer" - ctx = Context( - KafkaCharm, - meta=METADATA, - config=charm_configuration, - actions=ACTIONS, - ) - cluster_peer = PeerRelation(PEER, PEER) - state_in = dataclasses.replace(base_state, relations=[cluster_peer]) - - # When - state_out = ctx.run(ctx.on.start(), state_in) - - # Then - assert state_out.unit_status == Status.NO_PEER_CLUSTER_RELATION.value.status - - -def test_ready_to_start_no_zk_data(charm_configuration, base_state: State): - # Given - charm_configuration["options"]["roles"]["default"] = "balancer,broker" - ctx = Context( - KafkaCharm, - meta=METADATA, - config=charm_configuration, - actions=ACTIONS, - ) - cluster_peer = PeerRelation(PEER, PEER) - relation = Relation( - interface=ZK, - endpoint=ZK, - remote_app_name=ZK, - ) - state_in = dataclasses.replace(base_state, relations=[cluster_peer, relation]) - - # When - state_out = ctx.run(ctx.on.start(), state_in) - - # Then - assert state_out.unit_status == Status.ZK_NO_DATA.value.status - - -def test_ready_to_start_no_broker_data(charm_configuration, base_state: State, zk_data): - # Given - charm_configuration["options"]["roles"]["default"] = "balancer,broker" - ctx = Context( - KafkaCharm, - meta=METADATA, - config=charm_configuration, - actions=ACTIONS, - ) - cluster_peer = PeerRelation( - PEER, PEER, local_app_data={f"{user}-password": "pwd" for user in INTERNAL_USERS} - ) - relation = Relation(interface=ZK, endpoint=ZK, remote_app_name=ZK, remote_app_data=zk_data) - state_in = dataclasses.replace(base_state, relations=[cluster_peer, relation]) - - # When - state_out = ctx.run(ctx.on.start(), state_in) - - # Then - assert state_out.unit_status == Status.NO_BROKER_DATA.value.status - - -def test_ready_to_start_ok(charm_configuration, base_state: State, zk_data): - # Given - charm_configuration["options"]["roles"]["default"] = "balancer,broker" - ctx = Context( - KafkaCharm, meta=METADATA, config=charm_configuration, actions=ACTIONS, unit_id=0 - ) - restart_peer = PeerRelation("restart", "restart") - cluster_peer = PeerRelation( - PEER, - local_app_data={f"{user}-password": "pwd" for user in INTERNAL_USERS}, - peers_data={ - i: { - "cores": "8", - "storages": json.dumps( - {f"/var/snap/charmed-kafka/common/var/lib/kafka/data/{i}": "10240"} - ), - } - for i in range(1, 3) - }, - local_unit_data={ - "cores": "8", - "storages": json.dumps( - {f"/var/snap/charmed-kafka/common/var/lib/kafka/data/{0}": "10240"} - ), - }, - ) - - relation = Relation(interface=ZK, endpoint=ZK, remote_app_name=ZK) - state_in = dataclasses.replace( - base_state, relations=[cluster_peer, relation, restart_peer], planned_units=3 - ) - capacity_jbod_file_loaded = { - "brokerCapacities": [ - { - "brokerId": str(i), - "capacity": { - "DISK": {f"/var/snap/charmed-kafka/common/var/lib/kafka/data/{i}": "10240"}, - "CPU": {"num.cores": "8"}, - "NW_IN": "50000", - "NW_OUT": "50000", - }, - "doc": "", - } - for i in range(2, -1, -1) - ] - } - - # When - with ( - patch("workload.BalancerWorkload.write") as patched_writer, - patch("workload.BalancerWorkload.read"), - patch("workload.KafkaWorkload.read"), - patch("workload.BalancerWorkload.exec"), - patch("workload.BalancerWorkload.restart"), - patch("workload.KafkaWorkload.start"), - patch("workload.BalancerWorkload.active", return_value=True), - patch("workload.KafkaWorkload.active", return_value=True), - patch("core.models.ZooKeeper.broker_active", return_value=True), - patch( - "core.models.ZooKeeper.zookeeper_connected", - new_callable=PropertyMock, - return_value=True, - ), - patch( - "core.models.PeerCluster.broker_connected", - new_callable=PropertyMock, - return_value=True, - ), - patch( - "managers.config.ConfigManager.server_properties", - new_callable=PropertyMock, - return_value=[], - ), - patch( - "managers.config.BalancerConfigManager.cruise_control_properties", - new_callable=PropertyMock, - return_value=[], - ), - patch( - "managers.config.ConfigManager.jaas_config", new_callable=PropertyMock, return_value="" - ), - patch( - "managers.config.BalancerConfigManager.jaas_config", - new_callable=PropertyMock, - return_value="", - ), - patch("health.KafkaHealth.machine_configured", return_value=True), - patch("json.loads", return_value=capacity_jbod_file_loaded), - patch("charms.operator_libs_linux.v1.snap.SnapCache"), - patch( - "core.models.PeerCluster.broker_capacities", - new_callable=PropertyMock, - return_value=capacity_jbod_file_loaded, - ), - ): - state_out = ctx.run(ctx.on.start(), state_in) - - # Then - assert state_out.unit_status == ActiveStatus() - # Credentials written to file - assert re.match( - rf"{BALANCER_WEBSERVER_USER}: \w+,ADMIN", - patched_writer.call_args_list[-1].kwargs["content"], - ) diff --git a/tests/unit/test_auth.py b/tests/unit/test_auth.py index 99903086..33d90b2a 100644 --- a/tests/unit/test_auth.py +++ b/tests/unit/test_auth.py @@ -1,46 +1,19 @@ #!/usr/bin/env python3 -# Copyright 2024 Canonical Ltd. +# Copyright 2023 Canonical Ltd. # See LICENSE file for licensing details. import logging -from pathlib import Path -from unittest.mock import patch +from unittest.mock import Mock import pytest -import yaml -from ops.testing import Harness -from charm import KafkaCharm -from literals import CHARM_KEY, CONTAINER, SUBSTRATE from managers.auth import Acl, AuthManager +from workload import KafkaWorkload logger = logging.getLogger(__name__) pytestmark = pytest.mark.broker -CONFIG = str(yaml.safe_load(Path("./config.yaml").read_text())) -ACTIONS = str(yaml.safe_load(Path("./actions.yaml").read_text())) -METADATA = str(yaml.safe_load(Path("./metadata.yaml").read_text())) - - -@pytest.fixture -def harness(): - harness = Harness(KafkaCharm, meta=METADATA) - - if SUBSTRATE == "k8s": - harness.set_can_connect(CONTAINER, True) - - harness.add_relation("restart", CHARM_KEY) - harness._update_config( - { - "log_retention_ms": "-1", - "compression_type": "producer", - } - ) - - harness.begin() - return harness - def test_acl(): assert sorted(Acl.__annotations__.keys()) == sorted( @@ -101,56 +74,22 @@ def test_generate_consumer_acls(): assert sorted(resource_types) == sorted({"TOPIC", "GROUP"}) -def test_add_user_adds_zk_tls_flag(harness: Harness[KafkaCharm]): +def test_add_user_adds_zk_tls_flag(monkeypatch) -> None: """Checks zk-tls-config-file flag is called for configs bin command.""" - with patch("workload.KafkaWorkload.run_bin_command") as patched_exec: - harness.charm.broker.auth_manager.add_user("samwise", "gamgee", zk_auth=True) - args = patched_exec.call_args_list[0][1] - - assert ( - f"--zk-tls-config-file={harness.charm.workload.paths.server_properties}" - in args["bin_args"] - ), "--zk-tls-config-file flag not found" - assert "--zookeeper=" in args["bin_args"], "--zookeeper flag not found" - - -def test_prefixed_acls(harness: Harness[KafkaCharm]): - """Checks the requirements for adding and removing PREFIXED ACLs.""" - with patch("workload.KafkaWorkload.run_bin_command") as patched_run_bin: - for func in [ - harness.charm.broker.auth_manager.add_acl, - harness.charm.broker.auth_manager.remove_acl, - ]: - func( - username="bilbo", - operation="WRITE", - resource_type="TOPIC", - resource_name="there-and-back-again", - ) - func( - username="bilbo", - operation="WRITE", - resource_type="TOPIC", - resource_name="there-and-back-*", - ) - func(username="bilbo", operation="WRITE", resource_type="TOPIC", resource_name="??*") - - assert ( - "--resource-pattern-type=LITERAL" - in patched_run_bin.call_args_list[0].kwargs["bin_args"] - ) - - assert ( - "--resource-pattern-type=PREFIXED" - in patched_run_bin.call_args_list[1].kwargs["bin_args"] - ) - - # checks that the prefixed topic removes the '*' char from the end - assert ( - "--topic=there-and-back-" in patched_run_bin.call_args_list[1].kwargs["bin_args"] - ) - - assert ( - "--resource-pattern-type=LITERAL" - in patched_run_bin.call_args_list[2].kwargs["bin_args"] - ) + # Given + state = Mock() + state.zookeeper.connect = "host" + workload = KafkaWorkload(container=Mock()) + patched_exec = Mock() + monkeypatch.setattr(workload, "run_bin_command", patched_exec) + auth_manager = AuthManager(state, workload, "", "") + + # When + auth_manager.add_user("samwise", "gamgee", zk_auth=True) + args = patched_exec.call_args_list[0][1] + + # Then + assert ( + f"--zk-tls-config-file={workload.paths.server_properties}" in args["bin_args"] + ), "--zk-tls-config-file flag not found" + assert "--zookeeper=host" in args["bin_args"], "--zookeeper flag not found" diff --git a/tests/unit/test_balancer.py b/tests/unit/test_balancer.py index b12fa65f..48fbcd30 100644 --- a/tests/unit/test_balancer.py +++ b/tests/unit/test_balancer.py @@ -2,23 +2,75 @@ # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. +import dataclasses +import json import logging +import re from pathlib import Path -from unittest.mock import MagicMock, PropertyMock, patch +from typing import cast +from unittest.mock import PropertyMock, patch import pytest import yaml -from ops.testing import Harness +from ops import ActiveStatus +from ops.testing import ActionFailed, Container, Context, PeerRelation, Relation, State from charm import KafkaCharm -from literals import BALANCER_TOPICS, CHARM_KEY, CONTAINER, SUBSTRATE +from literals import ( + BALANCER_TOPICS, + BALANCER_WEBSERVER_USER, + CONTAINER, + PEER, + SUBSTRATE, + ZK, + Status, +) from managers.balancer import CruiseControlClient +pytestmark = pytest.mark.balancer + logger = logging.getLogger(__name__) -CONFIG = str(yaml.safe_load(Path("./config.yaml").read_text())) -ACTIONS = str(yaml.safe_load(Path("./actions.yaml").read_text())) -METADATA = str(yaml.safe_load(Path("./metadata.yaml").read_text())) + +CONFIG = yaml.safe_load(Path("./config.yaml").read_text()) +ACTIONS = yaml.safe_load(Path("./actions.yaml").read_text()) +METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) + + +@pytest.fixture() +def charm_configuration(): + """Enable direct mutation on configuration dict.""" + return json.loads(json.dumps(CONFIG)) + + +@pytest.fixture() +def base_state(): + + if SUBSTRATE == "k8s": + state = State(leader=True, containers=[Container(name=CONTAINER, can_connect=True)]) + + else: + state = State(leader=True) + + return state + + +@pytest.fixture() +def ctx_balancer_only(charm_configuration: dict) -> Context: + charm_configuration["options"]["roles"]["default"] = "balancer" + ctx = Context( + KafkaCharm, meta=METADATA, config=charm_configuration, actions=ACTIONS, unit_id=0 + ) + return ctx + + +@pytest.fixture() +def ctx_broker_and_balancer(charm_configuration: dict) -> Context: + charm_configuration["options"]["roles"]["default"] = "broker,balancer" + ctx = Context( + KafkaCharm, meta=METADATA, config=charm_configuration, actions=ACTIONS, unit_id=0 + ) + return ctx class MockResponse: @@ -34,25 +86,213 @@ def __dict__(self): return self.content -@pytest.fixture -def harness(): - harness = Harness(KafkaCharm, meta=METADATA) +@pytest.mark.skipif(SUBSTRATE == "k8s", reason="snap not used on K8s") +def test_install_blocks_snap_install_failure( + ctx_balancer_only: Context, base_state: State +) -> None: + # Given + ctx = ctx_balancer_only + state_in = base_state - if SUBSTRATE == "k8s": - harness.set_can_connect(CONTAINER, True) - - harness.add_relation("restart", CHARM_KEY) - harness._update_config( - { - "log_retention_ms": "-1", - "compression_type": "producer", - "roles": "broker,balancer", - } + # When + with patch("workload.Workload.install", return_value=False), patch("workload.Workload.write"): + state_out = ctx.run(ctx.on.install(), state_in) + + # Then + assert state_out.unit_status == Status.SNAP_NOT_INSTALLED.value.status + + +@patch("workload.Workload.restart") +@patch("workload.Workload.start") +def test_stop_workload_if_not_leader( + patched_start, patched_restart, ctx_balancer_only: Context, base_state: State +) -> None: + # Given + ctx = ctx_balancer_only + state_in = dataclasses.replace(base_state, leader=False) + + # When + ctx.run(ctx.on.start(), state_in) + + # Then + assert not patched_start.called + assert not patched_restart.called + + +def test_stop_workload_if_role_not_present(ctx_balancer_only: Context, base_state: State) -> None: + # Given + ctx = ctx_balancer_only + state_in = dataclasses.replace(base_state, config={"roles": "broker"}) + + # When + with ( + patch("workload.BalancerWorkload.active", return_value=True), + patch("workload.BalancerWorkload.stop") as patched_stopped, + ): + ctx.run(ctx.on.config_changed(), state_in) + + # Then + patched_stopped.assert_called_once() + + +def test_ready_to_start_maintenance_no_peer_relation( + ctx_balancer_only: Context, base_state: State +) -> None: + # Given + ctx = ctx_balancer_only + state_in = base_state + + # When + state_out = ctx.run(ctx.on.start(), state_in) + + # Then + assert state_out.unit_status == Status.NO_PEER_RELATION.value.status + + +def test_ready_to_start_no_peer_cluster(ctx_balancer_only: Context, base_state: State) -> None: + """Balancer only, need a peer cluster relation.""" + # Given + ctx = ctx_balancer_only + cluster_peer = PeerRelation(PEER, PEER) + state_in = dataclasses.replace(base_state, relations=[cluster_peer]) + + # When + state_out = ctx.run(ctx.on.start(), state_in) + + # Then + assert state_out.unit_status == Status.NO_PEER_CLUSTER_RELATION.value.status + + +def test_ready_to_start_no_zk_data(ctx_broker_and_balancer: Context, base_state: State) -> None: + # Given + ctx = ctx_broker_and_balancer + cluster_peer = PeerRelation(PEER, PEER) + relation = Relation( + interface=ZK, + endpoint=ZK, + remote_app_name=ZK, + ) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, relation]) + + # When + state_out = ctx.run(ctx.on.start(), state_in) + + # Then + assert state_out.unit_status == Status.ZK_NO_DATA.value.status + + +def test_ready_to_start_no_broker_data( + ctx_broker_and_balancer: Context, + base_state: State, + zk_data: dict[str, str], + passwords_data: dict[str, str], +) -> None: + # Given + ctx = ctx_broker_and_balancer + cluster_peer = PeerRelation(PEER, PEER, local_app_data=passwords_data) + relation = Relation(interface=ZK, endpoint=ZK, remote_app_name=ZK, remote_app_data=zk_data) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, relation]) + + # When + state_out = ctx.run(ctx.on.start(), state_in) + + # Then + assert state_out.unit_status == Status.NO_BROKER_DATA.value.status + + +def test_ready_to_start_ok( + ctx_broker_and_balancer: Context, + base_state: State, + zk_data: dict[str, str], + passwords_data: dict[str, str], +) -> None: + # Given + ctx = ctx_broker_and_balancer + cluster_peer = PeerRelation( + PEER, + local_app_data=passwords_data, + peers_data={ + i: { + "cores": "8", + "storages": json.dumps( + {f"/var/snap/charmed-kafka/common/var/lib/kafka/data/{i}": "10240"} + ), + } + for i in range(1, 3) + }, + local_unit_data={ + "cores": "8", + "storages": json.dumps( + {f"/var/snap/charmed-kafka/common/var/lib/kafka/data/{0}": "10240"} + ), + }, + ) + restart_peer = PeerRelation("restart", "restart") + relation = Relation(interface=ZK, endpoint=ZK, remote_app_name=ZK) + state_in = dataclasses.replace( + base_state, relations=[cluster_peer, restart_peer, relation], planned_units=3 ) - harness.set_leader(True) - harness.begin() - return harness + # When + with ( + patch("workload.BalancerWorkload.write") as patched_writer, + patch("workload.BalancerWorkload.read"), + patch( + "json.loads", + return_value={"brokerCapacities": [{}, {}, {}]}, + ), + patch( + "core.cluster.ClusterState.broker_capacities", + new_callable=PropertyMock, + return_value={"brokerCapacities": [{}, {}, {}]}, + ), + patch("workload.KafkaWorkload.read"), + patch("workload.BalancerWorkload.exec"), + patch("workload.BalancerWorkload.restart"), + patch("workload.KafkaWorkload.start"), + patch("workload.BalancerWorkload.active", return_value=True), + patch("workload.KafkaWorkload.active", return_value=True), + patch("core.models.ZooKeeper.broker_active", return_value=True), + patch( + "core.models.ZooKeeper.zookeeper_connected", + new_callable=PropertyMock, + return_value=True, + ), + patch( + "core.models.PeerCluster.broker_connected", + new_callable=PropertyMock, + return_value=True, + ), + patch( + "managers.config.ConfigManager.server_properties", + new_callable=PropertyMock, + return_value=[], + ), + patch( + "managers.config.BalancerConfigManager.cruise_control_properties", + new_callable=PropertyMock, + return_value=[], + ), + patch( + "managers.config.ConfigManager.jaas_config", new_callable=PropertyMock, return_value="" + ), + patch( + "managers.config.BalancerConfigManager.jaas_config", + new_callable=PropertyMock, + return_value="", + ), + patch("health.KafkaHealth.machine_configured", return_value=True), + patch("charms.operator_libs_linux.v1.snap.SnapCache"), # specific VM, works fine on k8s + ): + state_out = ctx.run(ctx.on.start(), state_in) + + # Then + assert state_out.unit_status == ActiveStatus() + # Credentials written to file + assert re.match( + rf"{BALANCER_WEBSERVER_USER}: \w+,ADMIN", + patched_writer.call_args_list[-1].kwargs["content"], + ) def test_client_get_args(client: CruiseControlClient): @@ -112,7 +352,14 @@ def test_client_ready(client: CruiseControlClient, state: dict): assert not client.ready -def test_balancer_manager_create_internal_topics(harness: Harness[KafkaCharm]): +def test_balancer_manager_create_internal_topics( + ctx_broker_and_balancer: Context, base_state: State +) -> None: + # Given + ctx = ctx_broker_and_balancer + state_in = base_state + + # When with ( patch("core.models.PeerCluster.broker_uris", new_callable=PropertyMock, return_value=""), patch( @@ -120,47 +367,52 @@ def test_balancer_manager_create_internal_topics(harness: Harness[KafkaCharm]): new_callable=None, return_value=BALANCER_TOPICS[0], # pretend it exists already ) as patched_run, + ctx(ctx.on.config_changed(), state_in) as manager, ): - harness.charm.balancer.balancer_manager.create_internal_topics() + charm = cast(KafkaCharm, manager.charm) + charm.balancer.balancer_manager.create_internal_topics() - assert ( - len(patched_run.call_args_list) == 5 - ) # checks for existence 3 times, creates 2 times + # Then - list_counter = 0 - for args, _ in patched_run.call_args_list: - all_flags = "".join(args[1]) + assert len(patched_run.call_args_list) == 5 # checks for existence 3 times, creates 2 times - if "list" in all_flags: - list_counter += 1 + list_counter = 0 + for args, _ in patched_run.call_args_list: + all_flags = "".join(args[1]) - # only created needed topics - if "create" in all_flags: - assert any((topic in all_flags) for topic in BALANCER_TOPICS) - assert BALANCER_TOPICS[0] not in all_flags + if "list" in all_flags: + list_counter += 1 - assert list_counter == len(BALANCER_TOPICS) # checked for existence of all balancer topics + # only created needed topics + if "create" in all_flags: + assert any((topic in all_flags) for topic in BALANCER_TOPICS) + assert BALANCER_TOPICS[0] not in all_flags + assert list_counter == len(BALANCER_TOPICS) # checked for existence of all balancer topics -@pytest.mark.parametrize("leader", [True, False]) + +@pytest.mark.parametrize("leader", [False, True]) @pytest.mark.parametrize("monitoring", [True, False]) @pytest.mark.parametrize("executing", [True, False]) @pytest.mark.parametrize("ready", [True, False]) @pytest.mark.parametrize("status", [200, 404]) def test_balancer_manager_rebalance_full( - harness: Harness[KafkaCharm], + ctx_broker_and_balancer: Context, + base_state: State, proposal: dict, leader: bool, monitoring: bool, executing: bool, ready: bool, status: int, -): - mock_event = MagicMock() - mock_event.params = {"mode": "full", "dryrun": True} +) -> None: + # Given + ctx = ctx_broker_and_balancer + state_in = dataclasses.replace(base_state, leader=leader) + payload = {"mode": "full", "dryrun": True} + # When with ( - harness.hooks_disabled(), patch( "managers.balancer.CruiseControlClient.monitoring", new_callable=PropertyMock, @@ -186,28 +438,33 @@ def test_balancer_manager_rebalance_full( new_callable=None, ) as patched_wait_for_task, ): - harness.set_leader(leader) - harness.charm.balancer.rebalance(mock_event) if not all([leader, monitoring, executing, ready, status == 200]): - assert mock_event._mock_children.get("fail") # event.fail was called + with pytest.raises(ActionFailed): + ctx.run(ctx.on.action("rebalance", params=payload), state_in) else: + ctx.run(ctx.on.action("rebalance", params=payload), state_in) assert patched_wait_for_task.call_count - assert mock_event._mock_children.get("set_results") # event.set_results was called @pytest.mark.parametrize("mode", ["add", "remove"]) @pytest.mark.parametrize("brokerid", [None, 0]) def test_rebalance_add_remove_broker_id_length( - harness: Harness[KafkaCharm], proposal: dict, mode: str, brokerid: int | None + ctx_broker_and_balancer: Context, + base_state: State, + proposal: dict, + mode: str, + brokerid: int | None, ): - mock_event = MagicMock() + # Given + ctx = ctx_broker_and_balancer + state_in = base_state + payload = {"mode": mode, "dryrun": True} payload = payload | {"brokerid": brokerid} if brokerid is not None else payload - mock_event.params = payload + # When with ( - harness.hooks_disabled(), patch( "managers.balancer.CruiseControlClient.monitoring", new_callable=PropertyMock, @@ -233,26 +490,28 @@ def test_rebalance_add_remove_broker_id_length( new_callable=None, ) as patched_wait_for_task, ): - harness.set_leader(True) - - # When - harness.charm.balancer.rebalance(mock_event) - # Then if brokerid is None: - assert mock_event._mock_children.get("fail") # event.fail was called + with pytest.raises(ActionFailed): + ctx.run(ctx.on.action("rebalance", params=payload), state_in) + else: + ctx.run(ctx.on.action("rebalance", params=payload), state_in) + + # Then assert patched_wait_for_task.call_count - assert mock_event._mock_children.get("set_results") # event.set_results was called -def test_rebalance_broker_id_not_found(harness: Harness[KafkaCharm]): - mock_event = MagicMock() - payload = {"mode": "add", "dryrun": True, "brokerid": 999} - mock_event.params = payload +def test_rebalance_broker_id_not_found( + ctx_broker_and_balancer: Context, base_state: State +) -> None: + # Given + ctx = ctx_broker_and_balancer + state_in = base_state + payload = {"mode": "add", "dryrun": True, "brokerid": 999} # only one unit in the state + # When with ( - harness.hooks_disabled(), patch( "managers.balancer.CruiseControlClient.monitoring", new_callable=PropertyMock, @@ -269,17 +528,17 @@ def test_rebalance_broker_id_not_found(harness: Harness[KafkaCharm]): return_value=True, ), ): - harness.set_leader(True) - # When - harness.charm.balancer.rebalance(mock_event) + with pytest.raises(ActionFailed): + ctx.run(ctx.on.action("rebalance", params=payload), state_in) - # Then - assert mock_event._mock_children.get("fail") # event.fail was called - -def test_balancer_manager_clean_results(harness: Harness[KafkaCharm], proposal: dict): - cleaned_results = harness.charm.balancer.balancer_manager.clean_results(value=proposal) +def test_balancer_manager_clean_results( + ctx_broker_and_balancer: Context, base_state: State, proposal: dict +) -> None: + # Given + ctx = ctx_broker_and_balancer + state_in = base_state def _check_cleaned_results(value) -> bool: if isinstance(value, list): @@ -293,4 +552,10 @@ def _check_cleaned_results(value) -> bool: return True + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) + cleaned_results = charm.balancer.balancer_manager.clean_results(value=proposal) + + # Then assert _check_cleaned_results(cleaned_results) diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index d502b176..ce8adbae 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -1,16 +1,17 @@ #!/usr/bin/env python3 -# Copyright 2024 Canonical Ltd. +# Copyright 2023 Canonical Ltd. # See LICENSE file for licensing details. +import dataclasses import logging import re from pathlib import Path -from unittest.mock import Mock, PropertyMock, patch +from typing import cast +from unittest.mock import PropertyMock, patch import pytest import yaml -from ops.model import BlockedStatus -from ops.testing import Harness +from ops.testing import Container, Context, PeerRelation, Relation, State, Storage from charm import KafkaCharm from literals import ( @@ -31,222 +32,299 @@ from literals import OS_REQUIREMENTS -logger = logging.getLogger(__name__) - pytestmark = pytest.mark.broker -CONFIG = str(yaml.safe_load(Path("./config.yaml").read_text())) -ACTIONS = str(yaml.safe_load(Path("./actions.yaml").read_text())) -METADATA = str(yaml.safe_load(Path("./metadata.yaml").read_text())) +logger = logging.getLogger(__name__) + + +CONFIG = yaml.safe_load(Path("./config.yaml").read_text()) +ACTIONS = yaml.safe_load(Path("./actions.yaml").read_text()) +METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) -@pytest.fixture -def harness() -> Harness: - harness = Harness(KafkaCharm, meta=METADATA, actions=ACTIONS, config=CONFIG) +@pytest.fixture() +def base_state(): if SUBSTRATE == "k8s": - harness.set_can_connect(CONTAINER, True) - - harness.add_relation("restart", CHARM_KEY) - harness._update_config( - { - "log_retention_ms": "-1", - "compression_type": "producer", - "expose-external": "none", - } - ) - harness.begin() - storage_metadata = getattr(harness.charm, "meta").storages["data"] - min_storages = storage_metadata.multiple_range[0] if storage_metadata.multiple_range else 1 - with harness.hooks_disabled(): - harness.add_storage(storage_name="data", count=min_storages, attach=True) - return harness + state = State(leader=True, containers=[Container(name=CONTAINER, can_connect=True)]) + + else: + state = State(leader=True) + + return state + + +@pytest.fixture() +def ctx() -> Context: + ctx = Context(KafkaCharm, meta=METADATA, config=CONFIG, actions=ACTIONS, unit_id=0) + return ctx @pytest.mark.skipif(SUBSTRATE == "k8s", reason="sysctl config not used on K8s") -def test_install_blocks_snap_install_failure(harness: Harness[KafkaCharm]): +def test_install_blocks_snap_install_failure(ctx: Context, base_state: State) -> None: """Checks unit goes to BlockedStatus after snap failure on install hook.""" - with patch("workload.KafkaWorkload.install", return_value=False): - harness.charm.on.install.emit() - assert harness.charm.unit.status == Status.SNAP_NOT_INSTALLED.value.status + # Given + state_in = base_state + # When + with patch("workload.Workload.install", return_value=False), patch("workload.Workload.write"): + state_out = ctx.run(ctx.on.install(), state_in) -def test_install_sets_env_vars(harness: Harness[KafkaCharm], patched_etc_environment): + # Then + assert state_out.unit_status == Status.SNAP_NOT_INSTALLED.value.status + + +def test_install_sets_env_vars(ctx: Context, base_state: State, patched_etc_environment) -> None: """Checks KAFKA_OPTS and other vars are written to /etc/environment on install hook.""" - with patch("workload.KafkaWorkload.install"): - harness.charm.on.install.emit() - patched_etc_environment.assert_called_once() + # Given + state_in = base_state + + # When + with patch("workload.Workload.install"): + _ = ctx.run(ctx.on.install(), state_in) + + # Then + patched_etc_environment.assert_called_once() @pytest.mark.skipif(SUBSTRATE == "k8s", reason="sysctl config not used on K8s") -def test_install_configures_os(harness: Harness[KafkaCharm], patched_sysctl_config): - with patch("workload.KafkaWorkload.install"): - harness.charm.on.install.emit() - patched_sysctl_config.assert_called_once_with(OS_REQUIREMENTS) +def test_install_configures_os(ctx: Context, base_state: State, patched_sysctl_config) -> None: + # Given + state_in = base_state + + # When + with patch("workload.Workload.install"): + _ = ctx.run(ctx.on.install(), state_in) + + # Then + patched_sysctl_config.assert_called_once_with(OS_REQUIREMENTS) @pytest.mark.skipif(SUBSTRATE == "k8s", reason="sysctl config not used on K8s") def test_install_sets_status_if_os_config_fails( - harness: Harness[KafkaCharm], patched_sysctl_config -): - with patch("workload.KafkaWorkload.install"): + ctx: Context, base_state: State, patched_sysctl_config +) -> None: + # Given + state_in = base_state + + # When + with patch("workload.Workload.install"): patched_sysctl_config.side_effect = ApplyError("Error setting values") - harness.charm.on.install.emit() + state_out = ctx.run(ctx.on.install(), state_in) - assert harness.charm.unit.status == Status.SYSCONF_NOT_POSSIBLE.value.status + # Then + assert state_out.unit_status == Status.SYSCONF_NOT_POSSIBLE.value.status -def test_ready_to_start_maintenance_no_peer_relation(harness: Harness[KafkaCharm]): - harness.charm.on.start.emit() - assert harness.charm.unit.status == Status.NO_PEER_RELATION.value.status +def test_ready_to_start_maintenance_no_peer_relation(ctx: Context, base_state: State) -> None: + # Given + state_in = base_state + # When + state_out = ctx.run(ctx.on.start(), state_in) -def test_ready_to_start_blocks_no_mode(harness: Harness[KafkaCharm]): - with harness.hooks_disabled(): - harness.add_relation(PEER, CHARM_KEY) + # Then + assert state_out.unit_status == Status.NO_PEER_RELATION.value.status - harness.charm.on.start.emit() - assert harness.charm.unit.status == Status.MISSING_MODE.value.status +def test_ready_to_start_blocks_no_zookeeper_relation(ctx: Context, base_state: State) -> None: + # Given + cluster_peer = PeerRelation(PEER, PEER) + state_in = dataclasses.replace(base_state, relations=[cluster_peer]) -def test_ready_to_start_waits_no_zookeeper_data(harness: Harness[KafkaCharm]): - with harness.hooks_disabled(): - harness.add_relation(PEER, CHARM_KEY) - harness.add_relation(ZK, ZK) + # When + state_out = ctx.run(ctx.on.start(), state_in) - harness.charm.on.start.emit() - assert harness.charm.unit.status == Status.ZK_NO_DATA.value.status + # Then + assert state_out.unit_status == Status.MISSING_MODE.value.status -def test_ready_to_start_waits_no_user_credentials(harness: Harness[KafkaCharm], zk_data): - with harness.hooks_disabled(): - harness.add_relation(PEER, CHARM_KEY) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.update_relation_data(zk_rel_id, ZK, zk_data) +def test_ready_to_start_waits_no_zookeeper_data(ctx: Context, base_state: State) -> None: + # Given + cluster_peer = PeerRelation(PEER, PEER) + zk_relation = Relation(ZK, ZK) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, zk_relation]) - harness.charm.on.start.emit() - assert harness.charm.unit.status == Status.NO_BROKER_CREDS.value.status + # When + state_out = ctx.run(ctx.on.start(), state_in) + # Then + assert state_out.unit_status == Status.ZK_NO_DATA.value.status -def test_ready_to_start_blocks_mismatch_tls(harness: Harness[KafkaCharm], zk_data, passwords_data): - with harness.hooks_disabled(): - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.update_relation_data(zk_rel_id, ZK, zk_data) - harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) - harness.update_relation_data(peer_rel_id, CHARM_KEY, {"tls": "enabled"}) - harness.charm.on.start.emit() - assert harness.charm.unit.status == Status.ZK_TLS_MISMATCH.value.status +def test_ready_to_start_waits_no_user_credentials( + ctx: Context, base_state: State, zk_data: dict[str, str] +) -> None: + # Given + cluster_peer = PeerRelation(PEER, PEER) + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, zk_relation]) + # When + state_out = ctx.run(ctx.on.start(), state_in) -def test_ready_to_start_succeeds(harness: Harness[KafkaCharm], zk_data, passwords_data): - with harness.hooks_disabled(): - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.update_relation_data(zk_rel_id, ZK, zk_data) - harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) + # Then + assert state_out.unit_status == Status.NO_BROKER_CREDS.value.status - assert harness.charm.state.ready_to_start.value.status == Status.ACTIVE.value.status +def test_ready_to_start_blocks_mismatch_tls( + ctx: Context, base_state: State, zk_data: dict[str, str], passwords_data: dict[str, str] +) -> None: + # Given + cluster_peer = PeerRelation(PEER, PEER, local_app_data=passwords_data | {"tls": "enabled"}) + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, zk_relation]) -def test_healthy_fails_if_not_ready_to_start( - harness: Harness[KafkaCharm], zk_data, passwords_data -): - with harness.hooks_disabled(): - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.update_relation_data(zk_rel_id, ZK, zk_data) - harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) - harness.update_relation_data(peer_rel_id, CHARM_KEY, {"tls": "enabled"}) + # When + state_out = ctx.run(ctx.on.start(), state_in) + + # Then + assert state_out.unit_status == Status.ZK_TLS_MISMATCH.value.status - assert not harness.charm.broker.healthy +def test_ready_to_start_succeeds( + ctx: Context, base_state: State, zk_data: dict[str, str], passwords_data: dict[str, str] +) -> None: + # Given + cluster_peer = PeerRelation(PEER, PEER, local_app_data=passwords_data) + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, zk_relation]) -def test_healthy_fails_if_snap_not_active(harness: Harness[KafkaCharm], zk_data, passwords_data): - with harness.hooks_disabled(): - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.update_relation_data(zk_rel_id, ZK, zk_data) - harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) + # When + with ( + patch("workload.KafkaWorkload.active", return_value=True), + patch("workload.KafkaWorkload.write"), + patch("workload.KafkaWorkload.start") as patched_start, + patch("events.broker.BrokerOperator._on_update_status", autospec=True), + ): + ctx.run(ctx.on.start(), state_in) + + # Then + assert patched_start.call_count + + +def test_healthy_fails_if_not_ready_to_start( + ctx: Context, base_state: State, zk_data: dict[str, str], passwords_data: dict[str, str] +) -> None: + # Given + cluster_peer = PeerRelation(PEER, PEER, local_app_data=passwords_data | {"tls": "enabled"}) + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, zk_relation]) + + # When + with ctx(ctx.on.start(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) + assert not charm.broker.healthy + + +def test_healthy_fails_if_snap_not_active( + ctx: Context, base_state: State, zk_data: dict[str, str], passwords_data: dict[str, str] +) -> None: + # Given + cluster_peer = PeerRelation(PEER, PEER, local_app_data=passwords_data) + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, zk_relation]) + + # When + with ( + patch("workload.KafkaWorkload.active", return_value=False) as patched_snap_active, + patch("workload.KafkaWorkload.start"), + ctx(ctx.on.start(), state_in) as manager, + ): + charm = cast(KafkaCharm, manager.charm) + assert not charm.broker.healthy + state_out = manager.run() - with patch("workload.KafkaWorkload.active", return_value=False) as patched_snap_active: - assert not harness.charm.broker.healthy - assert patched_snap_active.call_count == 1 - assert harness.charm.unit.status == Status.BROKER_NOT_RUNNING.value.status + # Then + assert patched_snap_active.call_count + assert state_out.unit_status == Status.BROKER_NOT_RUNNING.value.status -def test_healthy_succeeds(harness: Harness[KafkaCharm], zk_data, passwords_data): - with harness.hooks_disabled(): - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.update_relation_data(zk_rel_id, ZK, zk_data) - harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) +def test_healthy_succeeds( + ctx: Context, base_state: State, zk_data: dict[str, str], passwords_data: dict[str, str] +): + cluster_peer = PeerRelation(PEER, PEER, local_app_data=passwords_data) + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, zk_relation]) - with patch("workload.KafkaWorkload.active", return_value=True): - assert harness.charm.broker.healthy + with ( + patch("workload.KafkaWorkload.active", return_value=True), + ctx(ctx.on.collect_app_status(), state_in) as manager, + ): + charm = cast(KafkaCharm, manager.charm) + assert charm.broker.healthy -def test_start_defers_without_zookeeper(harness: Harness[KafkaCharm]): +def test_start_defers_without_zookeeper(ctx: Context, base_state: State) -> None: """Checks event deferred and not lost without ZK relation on start hook.""" - with patch("ops.framework.EventBase.defer") as patched_defer: - harness.charm.on.start.emit() + # Given + cluster_peer = PeerRelation(PEER, PEER) + state_in = dataclasses.replace(base_state, relations=[cluster_peer]) + + # When + state_out = ctx.run(ctx.on.start(), state_in) - patched_defer.assert_called_once() + # Then + assert len(state_out.deferred) == 1 + assert state_out.deferred[0].name == "start" -def test_start_sets_necessary_config(harness: Harness[KafkaCharm], zk_data, passwords_data): +def test_start_sets_necessary_config( + ctx: Context, base_state: State, zk_data: dict[str, str], passwords_data: dict[str, str] +) -> None: """Checks event writes all needed config to unit on start hook.""" - with harness.hooks_disabled(): - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.set_leader(True) - harness.add_relation_unit(zk_rel_id, "zookeeper/0") - harness.update_relation_data(zk_rel_id, ZK, zk_data) - harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) + # Given + cluster_peer = PeerRelation(PEER, PEER, local_app_data=passwords_data) + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, zk_relation]) + # When with ( + # NOTE: Patching `active` cuts the hook short, as we are only testing properties being set. + patch("workload.KafkaWorkload.active", return_value=False), patch("managers.auth.AuthManager.add_user"), patch("managers.config.ConfigManager.set_zk_jaas_config") as patched_jaas, patch("managers.config.ConfigManager.set_server_properties") as patched_server_properties, patch("managers.config.ConfigManager.set_client_properties") as patched_client_properties, patch("workload.KafkaWorkload.start"), - # NOTE: Patching `active` cuts the hook short, as we are only testing properties being set. - patch("workload.KafkaWorkload.active", return_value=False), ): - harness.charm.on.start.emit() - patched_jaas.assert_called_once() - patched_server_properties.assert_called_once() - patched_client_properties.assert_called_once() + ctx.run(ctx.on.start(), state_in) + + # Then + patched_jaas.assert_called_once() + patched_server_properties.assert_called_once() + patched_client_properties.assert_called_once() @pytest.mark.skipif(SUBSTRATE == "vm", reason="pebble layer not used on vm") -def test_start_sets_pebble_layer(harness: Harness[KafkaCharm], zk_data, passwords_data): +def test_start_sets_pebble_layer( + ctx: Context, base_state: State, zk_data: dict[str, str], passwords_data: dict[str, str] +) -> None: """Checks layer is the expected at start.""" - with harness.hooks_disabled(): - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.set_leader(True) - harness.add_relation_unit(zk_rel_id, "zookeeper/0") - harness.update_relation_data(zk_rel_id, ZK, zk_data) - harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) + # Given + cluster_peer = PeerRelation(PEER, PEER, local_app_data=passwords_data) + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, zk_relation]) + # When with ( + # NOTE: Patching `active` cuts the hook short, as we are only testing layer being set. + patch("workload.KafkaWorkload.active", return_value=False), patch("managers.auth.AuthManager.add_user"), patch("managers.config.ConfigManager.set_zk_jaas_config"), patch("managers.config.ConfigManager.set_server_properties"), patch("managers.config.ConfigManager.set_client_properties"), - # NOTE: Patching `active` cuts the hook short, as we are only testing layer being set. - patch("workload.KafkaWorkload.active", return_value=False), + patch("workload.KafkaWorkload.start"), + ctx(ctx.on.start(), state_in) as manager, ): - harness.charm.on.start.emit() - found_plan = harness.get_container_pebble_plan("kafka").to_dict() + charm = cast(KafkaCharm, manager.charm) extra_opts = [ - f"-javaagent:{harness.charm.workload.paths.jmx_prometheus_javaagent}={JMX_EXPORTER_PORT}:{harness.charm.workload.paths.jmx_prometheus_config}", - f"-Djava.security.auth.login.config={harness.charm.workload.paths.zk_jaas}", + f"-javaagent:{charm.workload.paths.jmx_prometheus_javaagent}={JMX_EXPORTER_PORT}:{charm.workload.paths.jmx_prometheus_config}", + f"-Djava.security.auth.login.config={charm.workload.paths.zk_jaas}", ] - command = f"{harness.charm.workload.paths.binaries_path}/bin/kafka-server-start.sh {harness.charm.workload.paths.server_properties}" + command = f"{charm.workload.paths.binaries_path}/bin/kafka-server-start.sh {charm.workload.paths.server_properties}" expected_plan = { + "description": "Pebble config layer for kafka", "services": { CONTAINER: { "override": "merge", @@ -258,35 +336,44 @@ def test_start_sets_pebble_layer(harness: Harness[KafkaCharm], zk_data, password "environment": { "KAFKA_OPTS": " ".join(extra_opts), "JAVA_HOME": "/usr/lib/jvm/java-18-openjdk-amd64", - "LOG_DIR": harness.charm.workload.paths.logs_path, + "LOG_DIR": charm.workload.paths.logs_path, }, - } + }, }, + "summary": "kafka layer", } - assert expected_plan == found_plan + found_plan = charm.broker.workload.layer.to_dict() + + # Then + assert expected_plan == found_plan -def test_start_does_not_start_if_not_ready(harness: Harness[KafkaCharm]): +def test_start_does_not_start_if_not_ready(ctx: Context, base_state: State) -> None: """Checks snap service does not start before ready on start hook.""" - with harness.hooks_disabled(): - harness.add_relation(PEER, CHARM_KEY) + # Given + cluster_peer = PeerRelation(PEER, PEER) + state_in = dataclasses.replace(base_state, relations=[cluster_peer]) + # When with ( patch("workload.KafkaWorkload.start") as patched_start_snap_service, patch("ops.framework.EventBase.defer") as patched_defer, ): - harness.charm.on.start.emit() + _ = ctx.run(ctx.on.start(), state_in) - patched_start_snap_service.assert_not_called() - patched_defer.assert_called() + # Then + patched_start_snap_service.assert_not_called() + patched_defer.assert_called() -def test_start_does_not_start_if_not_same_tls_as_zk(harness: Harness[KafkaCharm]): +def test_start_does_not_start_if_not_same_tls_as_zk(ctx: Context, base_state: State): """Checks snap service does not start if mismatch Kafka+ZK TLS on start hook.""" - harness.add_relation(PEER, CHARM_KEY) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.add_relation_unit(zk_rel_id, "zookeeper/0") + # Given + cluster_peer = PeerRelation(PEER, PEER) + zk_relation = Relation(ZK, ZK) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, zk_relation]) + # When with ( patch("managers.auth.AuthManager.add_user"), patch("workload.KafkaWorkload.start") as patched_start_snap_service, @@ -294,57 +381,63 @@ def test_start_does_not_start_if_not_same_tls_as_zk(harness: Harness[KafkaCharm] patch("core.models.KafkaCluster.internal_user_credentials", return_value="orthanc"), patch("core.models.KafkaCluster.tls_enabled", return_value=True), ): - harness.charm.on.start.emit() + state_out = ctx.run(ctx.on.start(), state_in) - patched_start_snap_service.assert_not_called() - assert harness.charm.unit.status == Status.ZK_TLS_MISMATCH.value.status + # Then + patched_start_snap_service.assert_not_called() + assert state_out.unit_status == Status.ZK_TLS_MISMATCH.value.status -def test_start_does_not_start_if_leader_has_not_set_creds(harness: Harness[KafkaCharm]): +def test_start_does_not_start_if_leader_has_not_set_creds(ctx: Context, base_state: State) -> None: """Checks snap service does not start without inter-broker creds on start hook.""" - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.add_relation_unit(zk_rel_id, "zookeeper/0") - harness.update_relation_data(peer_rel_id, CHARM_KEY, {"sync-password": "mellon"}) + # Given + cluster_peer = PeerRelation(PEER, PEER, local_app_data={"sync-password": "mellon"}) + zk_relation = Relation(ZK, ZK) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, zk_relation]) + # When with ( patch("workload.KafkaWorkload.start") as patched_start_snap_service, patch("core.cluster.ZooKeeper.zookeeper_connected", return_value=True), ): - harness.charm.on.start.emit() + state_out = ctx.run(ctx.on.start(), state_in) - patched_start_snap_service.assert_not_called() - assert harness.charm.unit.status == Status.NO_BROKER_CREDS.value.status + # Then + patched_start_snap_service.assert_not_called() + assert state_out.unit_status == Status.NO_BROKER_CREDS.value.status def test_update_status_blocks_if_broker_not_active( - harness: Harness[KafkaCharm], zk_data, passwords_data + ctx: Context, base_state: State, zk_data: dict[str, str], passwords_data: dict[str, str] ): - with harness.hooks_disabled(): - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.update_relation_data(zk_rel_id, ZK, zk_data) - harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) + # Given + cluster_peer = PeerRelation(PEER, PEER, local_app_data=passwords_data) + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, zk_relation]) + # When with ( patch("workload.KafkaWorkload.active", return_value=True), patch("events.upgrade.KafkaUpgrade.idle", return_value=True), patch("core.cluster.ZooKeeper.broker_active", return_value=False) as patched_broker_active, ): - harness.charm.on.update_status.emit() - assert patched_broker_active.call_count == 1 - assert harness.charm.unit.status == Status.ZK_NOT_CONNECTED.value.status + state_out = ctx.run(ctx.on.update_status(), state_in) + + # Then + assert patched_broker_active.call_count == 1 + assert state_out.unit_status == Status.ZK_NOT_CONNECTED.value.status @pytest.mark.skipif(SUBSTRATE == "k8s", reason="machine health checks not used on K8s") def test_update_status_blocks_if_machine_not_configured( - harness: Harness[KafkaCharm], zk_data, passwords_data -): - with harness.hooks_disabled(): - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.update_relation_data(zk_rel_id, ZK, zk_data) - harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) + ctx: Context, base_state: State, zk_data: dict[str, str], passwords_data: dict[str, str] +) -> None: + # Given + cluster_peer = PeerRelation(PEER, PEER, local_app_data=passwords_data) + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, zk_relation]) + + # When with ( patch("health.KafkaHealth.machine_configured", side_effect=SnapError()), @@ -352,105 +445,121 @@ def test_update_status_blocks_if_machine_not_configured( patch("core.cluster.ZooKeeper.broker_active", return_value=True), patch("events.upgrade.KafkaUpgrade.idle", return_value=True), ): - harness.charm.on.update_status.emit() - assert harness.charm.unit.status == Status.BROKER_NOT_RUNNING.value.status + state_out = ctx.run(ctx.on.update_status(), state_in) + # Then + assert state_out.unit_status == Status.BROKER_NOT_RUNNING.value.status -@pytest.mark.skipif(SUBSTRATE == "k8s", reason="sysctl config not used on K8s") -def test_update_status_sets_sysconf_warning(harness: Harness[KafkaCharm], zk_data, passwords_data): - with harness.hooks_disabled(): - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.update_relation_data(zk_rel_id, ZK, zk_data) - harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) +@pytest.mark.skipif(SUBSTRATE == "k8s", reason="sysctl config not used on K8s") +def test_update_status_sets_sysconf_warning( + ctx: Context, base_state: State, zk_data: dict[str, str], passwords_data: dict[str, str] +) -> None: + # Given + cluster_peer = PeerRelation(PEER, PEER, local_app_data=passwords_data) + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, zk_relation]) + + # When with ( patch("workload.KafkaWorkload.active", return_value=True), patch("core.cluster.ZooKeeper.broker_active", return_value=True), patch("health.KafkaHealth.machine_configured", return_value=False), patch("events.upgrade.KafkaUpgrade.idle", return_value=True), ): - harness.charm.on.update_status.emit() - assert harness.charm.unit.status == Status.SYSCONF_NOT_OPTIMAL.value.status + state_out = ctx.run(ctx.on.update_status(), state_in) + # Then + assert state_out.unit_status == Status.SYSCONF_NOT_OPTIMAL.value.status -def test_update_status_sets_active( - harness: Harness[KafkaCharm], zk_data, passwords_data, patched_health_machine_configured -): - with harness.hooks_disabled(): - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.update_relation_data(zk_rel_id, ZK, zk_data) - harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) +def test_update_status_sets_active( + ctx: Context, + base_state: State, + zk_data: dict[str, str], + passwords_data: dict[str, str], + patched_health_machine_configured, +) -> None: + # Given + cluster_peer = PeerRelation(PEER, PEER, local_app_data=passwords_data) + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, zk_relation]) + + # When with ( patch("workload.KafkaWorkload.active", return_value=True), patch("core.cluster.ZooKeeper.broker_active", return_value=True), patch("events.upgrade.KafkaUpgrade.idle", return_value=True), ): - harness.charm.on.update_status.emit() - assert harness.charm.unit.status == Status.ACTIVE.value.status + state_out = ctx.run(ctx.on.update_status(), state_in) + + # Then + assert state_out.unit_status == Status.ACTIVE.value.status @pytest.mark.skipif(SUBSTRATE == "k8s", reason="multiple storage not supported in K8s") def test_storage_add_does_nothing_if_snap_not_active( - harness: Harness[KafkaCharm], zk_data, passwords_data -): - with harness.hooks_disabled(): - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") - harness.set_leader(True) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.update_relation_data(zk_rel_id, ZK, zk_data) - harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) + ctx: Context, base_state: State, zk_data: dict[str, str], passwords_data: dict[str, str] +) -> None: + # Given + cluster_peer = PeerRelation(PEER, PEER, local_app_data=passwords_data) + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data) + storage = Storage("data") + state_in = dataclasses.replace( + base_state, relations=[cluster_peer, zk_relation], storages=[storage] + ) + # When with ( patch("workload.KafkaWorkload.active", return_value=False), patch("charm.KafkaCharm._disable_enable_restart_broker") as patched_restart, ): - harness.add_storage(storage_name="data", count=2) - harness.attach_storage(storage_id="data/1") + ctx.run(ctx.on.storage_attached(storage), state_in) - assert patched_restart.call_count == 0 + # Then + assert patched_restart.call_count == 0 @pytest.mark.skipif(SUBSTRATE == "k8s", reason="multiple storage not supported in K8s") def test_storage_add_defers_if_service_not_healthy( - harness: Harness[KafkaCharm], zk_data, passwords_data -): - with harness.hooks_disabled(): - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") - harness.set_leader(True) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.update_relation_data(zk_rel_id, ZK, zk_data) - harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) + ctx: Context, base_state: State, zk_data: dict[str, str], passwords_data: dict[str, str] +) -> None: + # Given + cluster_peer = PeerRelation(PEER, PEER, local_app_data=passwords_data) + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data) + storage = Storage("data") + state_in = dataclasses.replace( + base_state, relations=[cluster_peer, zk_relation], storages=[storage] + ) + # When with ( patch("workload.KafkaWorkload.active", return_value=True), patch("events.broker.BrokerOperator.healthy", return_value=False), patch("charm.KafkaCharm._disable_enable_restart_broker") as patched_restart, patch("ops.framework.EventBase.defer") as patched_defer, ): - harness.add_storage(storage_name="data", count=2) - harness.attach_storage(storage_id="data/1") + ctx.run(ctx.on.storage_attached(storage), state_in) - assert patched_restart.call_count == 0 - assert patched_defer.call_count == 1 + # Then + assert patched_restart.call_count == 0 + assert patched_defer.call_count == 1 @pytest.mark.skipif(SUBSTRATE == "k8s", reason="multiple storage not supported in K8s") def test_storage_add_disableenables_and_starts( - harness: Harness[KafkaCharm], zk_data, passwords_data -): - with harness.hooks_disabled(): - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") - harness.set_leader(True) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.update_relation_data(zk_rel_id, ZK, zk_data) - harness.update_relation_data(peer_rel_id, CHARM_KEY, passwords_data) + ctx: Context, base_state: State, zk_data: dict[str, str], passwords_data: dict[str, str] +) -> None: + # Given + cluster_peer = PeerRelation(PEER, PEER, local_app_data=passwords_data) + restart_peer = PeerRelation("restart", "restart") + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data) + storage = Storage("data") + state_in = dataclasses.replace( + base_state, relations=[cluster_peer, restart_peer, zk_relation], storages=[storage] + ) + # When with ( patch("workload.KafkaWorkload.active", return_value=True), patch("events.broker.BrokerOperator.healthy", return_value=True), @@ -463,24 +572,27 @@ def test_storage_add_disableenables_and_starts( patch("workload.KafkaWorkload.start") as patched_start, patch("ops.framework.EventBase.defer") as patched_defer, ): - harness.add_storage(storage_name="data", count=2) - harness.attach_storage(storage_id="data/1") + ctx.run(ctx.on.storage_attached(storage), state_in) - assert patched_disable_enable.call_count == 1 - assert patched_start.call_count == 1 - assert patched_defer.call_count == 0 + # Then + assert patched_disable_enable.call_count == 1 + assert patched_start.call_count == 1 + assert patched_defer.call_count == 0 def test_zookeeper_changed_sets_passwords_and_creates_users_with_zk( - harness: Harness[KafkaCharm], zk_data -): + ctx: Context, base_state: State, zk_data: dict[str, str] +) -> None: """Checks inter-broker passwords are created on zookeeper-changed hook using zk auth.""" - with harness.hooks_disabled(): - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") - harness.set_leader(True) - zk_rel_id = harness.add_relation(ZK, ZK) + # Given + cluster_peer = PeerRelation(PEER, PEER) + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data) + state_in = dataclasses.replace( + base_state, + relations=[cluster_peer, zk_relation], + ) + # When with ( patch("workload.KafkaWorkload.active", return_value=True), patch("managers.auth.AuthManager.add_user") as patched_add_user, @@ -488,58 +600,82 @@ def test_zookeeper_changed_sets_passwords_and_creates_users_with_zk( patch( "managers.config.ConfigManager.set_server_properties" ) as patched_set_server_properties, + ctx(ctx.on.relation_changed(zk_relation), state_in) as manager, ): - harness.update_relation_data(zk_rel_id, ZK, zk_data) - + charm = cast(KafkaCharm, manager.charm) + manager.run() for user in INTERNAL_USERS: - assert harness.charm.state.cluster.relation_data.get(f"{user}-password", None) + assert charm.state.cluster.relation_data.get(f"{user}-password", None) - patched_set_zk_jaas.assert_called() - patched_set_server_properties.assert_called() + # Then + patched_set_zk_jaas.assert_called() + patched_set_server_properties.assert_called() - # checks all users are INTERNAL only - for call in patched_add_user.kwargs.get("username", []): - assert call in INTERNAL_USERS + # checks all users are INTERNAL only + for call in patched_add_user.kwargs.get("username", []): + assert call in INTERNAL_USERS - # checks all users added are added with --zookeeper auth - for call in patched_add_user.kwargs.get("zk_auth", False): - assert True + # checks all users added are added with --zookeeper auth + for call in patched_add_user.kwargs.get("zk_auth", False): + assert True -def test_zookeeper_joined_sets_chroot(harness: Harness[KafkaCharm]): - """Checks chroot is added to ZK relation data on ZKrelationjoined hook.""" - harness.add_relation(PEER, CHARM_KEY) - harness.set_leader(True) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.add_relation_unit(zk_rel_id, f"{ZK}/0") +def test_zookeeper_created_sets_chroot(ctx: Context, base_state: State) -> None: + """Checks chroot is added to ZK relation data on ZKrelationcreated hook.""" + # Given + cluster_peer = PeerRelation(PEER, PEER) + zk_relation = Relation(ZK, ZK) + state_in = dataclasses.replace( + base_state, + relations=[cluster_peer, zk_relation], + ) - rel = harness.charm.model.relations[ZK][0].data[harness.charm.app] - assert CHARM_KEY in rel.get("database", rel.get("chroot", "")) + # When + state_out = ctx.run(ctx.on.relation_created(zk_relation), state_in) + # Then + assert (local_databag := state_out.get_relation(zk_relation.id).local_app_data) + assert CHARM_KEY in local_databag.get("database", "") -def test_zookeeper_broken_stops_service_and_removes_meta_properties(harness: Harness[KafkaCharm]): + +def test_zookeeper_broken_stops_service_and_removes_meta_properties( + ctx: Context, base_state: State +) -> None: """Checks chroot is added to ZK relation data on ZKrelationjoined hook.""" - harness.add_relation(PEER, CHARM_KEY) - zk_rel_id = harness.add_relation(ZK, ZK) + # Given + cluster_peer = PeerRelation(PEER, PEER) + zk_relation = Relation(ZK, ZK) + state_in = dataclasses.replace( + base_state, + relations=[cluster_peer, zk_relation], + ) + # When with ( patch("workload.KafkaWorkload.stop") as patched_stop_snap_service, patch("workload.KafkaWorkload.exec") as patched_exec, ): - harness.remove_relation(zk_rel_id) + state_out = ctx.run(ctx.on.relation_broken(zk_relation), state_in) - patched_stop_snap_service.assert_called_once() - assert re.match(r"rm .*/meta.properties", " ".join(patched_exec.call_args_list[1].args[0])) - assert isinstance(harness.charm.unit.status, BlockedStatus) + # Then + patched_stop_snap_service.assert_called_once() + assert re.findall(r"meta.properties -delete", " ".join(patched_exec.call_args_list[1].args[0])) + assert state_out.unit_status == Status.ZK_NOT_RELATED.value.status -def test_zookeeper_broken_cleans_internal_user_credentials(harness: Harness[KafkaCharm]): +def test_zookeeper_broken_cleans_internal_user_credentials( + ctx: Context, base_state: State +) -> None: """Checks chroot is added to ZK relation data on ZKrelationjoined hook.""" - with harness.hooks_disabled(): - harness.add_relation(PEER, CHARM_KEY) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.set_leader(True) + # Given + cluster_peer = PeerRelation(PEER, PEER) + zk_relation = Relation(ZK, ZK) + state_in = dataclasses.replace( + base_state, + relations=[cluster_peer, zk_relation], + ) + # When with ( patch("workload.KafkaWorkload.stop"), patch("workload.KafkaWorkload.exec"), @@ -550,20 +686,26 @@ def test_zookeeper_broken_cleans_internal_user_credentials(harness: Harness[Kafk return_value={"saruman": "orthanc"}, ), ): - harness.remove_relation(zk_rel_id) + ctx.run(ctx.on.relation_broken(zk_relation), state_in) - patched_update.assert_called_once_with({"saruman-password": ""}) + # Then + patched_update.assert_called_once_with({"saruman-password": ""}) -def test_config_changed_updates_server_properties(harness: Harness[KafkaCharm], zk_data): +def test_config_changed_updates_server_properties( + ctx: Context, base_state: State, zk_data: dict[str, str] +) -> None: """Checks that new charm/unit config writes server config to unit on config changed hook.""" - with harness.hooks_disabled(): - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.add_relation_unit(zk_rel_id, f"{ZK}/0") - harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") - harness.update_relation_data(zk_rel_id, ZK, zk_data) + # Given + cluster_peer = PeerRelation(PEER, PEER) + restart_peer = PeerRelation("restart", "rolling_op") + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data) + state_in = dataclasses.replace( + base_state, + relations=[cluster_peer, restart_peer, zk_relation], + ) + # When with ( patch( "managers.config.ConfigManager.server_properties", @@ -575,17 +717,24 @@ def test_config_changed_updates_server_properties(harness: Harness[KafkaCharm], patch("workload.KafkaWorkload.read", return_value=["gandalf=grey"]), patch("managers.config.ConfigManager.set_server_properties") as set_server_properties, patch("managers.config.ConfigManager.set_client_properties"), + patch( + "charms.rolling_ops.v0.rollingops.RollingOpsManager._on_run_with_lock", autospec=True + ), ): - harness.charm.on.config_changed.emit() + ctx.run(ctx.on.config_changed(), state_in) - set_server_properties.assert_called_once() + # Then + set_server_properties.assert_called_once() -def test_config_changed_updates_client_properties(harness: Harness[KafkaCharm]): +def test_config_changed_updates_client_properties(ctx: Context, base_state: State) -> None: """Checks that new charm/unit config writes client config to unit on config changed hook.""" - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") + # Given + cluster_peer = PeerRelation(PEER, PEER) + restart_peer = PeerRelation("restart", "rolling_op") + state_in = dataclasses.replace(base_state, relations=[cluster_peer, restart_peer]) + # When with ( patch( "managers.config.ConfigManager.client_properties", @@ -602,18 +751,25 @@ def test_config_changed_updates_client_properties(harness: Harness[KafkaCharm]): patch("workload.KafkaWorkload.read", return_value=["gandalf=grey"]), patch("managers.config.ConfigManager.set_server_properties"), patch("managers.config.ConfigManager.set_client_properties") as set_client_properties, + patch( + "charms.rolling_ops.v0.rollingops.RollingOpsManager._on_run_with_lock", autospec=True + ), ): - harness.charm.on.config_changed.emit() + ctx.run(ctx.on.config_changed(), state_in) - set_client_properties.assert_called_once() + # Then + set_client_properties.assert_called_once() -def test_config_changed_updates_client_data(harness: Harness[KafkaCharm]): +def test_config_changed_updates_client_data(ctx: Context, base_state: State) -> None: """Checks that provided relation data updates on config changed hook.""" - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") - harness.add_relation(REL_NAME, "app") + # Given + cluster_peer = PeerRelation(PEER, PEER) + restart_peer = PeerRelation("restart", "rolling_op") + client = Relation(REL_NAME, REL_NAME) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, restart_peer, client]) + # When with ( patch( "managers.config.ConfigManager.server_properties", @@ -628,21 +784,24 @@ def test_config_changed_updates_client_data(harness: Harness[KafkaCharm]): patch( "managers.config.ConfigManager.set_client_properties" ) as patched_set_client_properties, + patch( + "charms.rolling_ops.v0.rollingops.RollingOpsManager._on_run_with_lock", autospec=True + ), ): - harness.set_leader(True) - harness.charm.on.config_changed.emit() + ctx.run(ctx.on.config_changed(), state_in) - patched_set_client_properties.assert_called_once() - patched_update_client_data.assert_called_once() + # Then + patched_set_client_properties.assert_called_once() + patched_update_client_data.assert_called_once() -def test_config_changed_restarts(harness: Harness[KafkaCharm]): +def test_config_changed_restarts(ctx: Context, base_state: State) -> None: """Checks units rolling-restat on config changed hook.""" - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") - harness.set_leader(True) - zk_rel_id = harness.add_relation(ZK, ZK) - harness.add_relation_unit(zk_rel_id, f"{ZK}/0") + # Given + cluster_peer = PeerRelation(PEER, PEER) + restart_peer = PeerRelation("restart", "rolling_op") + zk_relation = Relation(ZK, ZK, remote_app_data={"username": "glorfindel"}) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, restart_peer, zk_relation]) with ( patch( @@ -653,44 +812,45 @@ def test_config_changed_restarts(harness: Harness[KafkaCharm]): patch("events.broker.BrokerOperator.healthy", return_value=True), patch("workload.KafkaWorkload.read", return_value=["gandalf=white"]), patch("events.upgrade.KafkaUpgrade.idle", return_value=True), - patch("workload.KafkaWorkload.restart") as patched_restart_snap_service, patch("core.cluster.ZooKeeper.broker_active", return_value=True), patch("core.cluster.ZooKeeper.zookeeper_connected", return_value=True), patch("managers.auth.AuthManager.add_user"), patch("managers.config.ConfigManager.set_zk_jaas_config"), patch("managers.config.ConfigManager.set_server_properties"), + patch( + "charms.rolling_ops.v0.rollingops.RollingOpsManager._on_run_with_lock", autospec=True + ) as patched_restart_lib, ): - harness.update_relation_data(zk_rel_id, ZK, {"username": "glorfindel"}) - patched_restart_snap_service.reset_mock() - harness.charm.on.config_changed.emit() - patched_restart_snap_service.assert_called_once() + ctx.run(ctx.on.config_changed(), state_in) + patched_restart_lib.assert_called_once() + pass @pytest.mark.skipif(SUBSTRATE == "k8s", reason="sysctl config not used on K8s") -def test_on_remove_sysctl_is_deleted(harness: Harness[KafkaCharm]): - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") +def test_on_remove_sysctl_is_deleted(ctx: Context, base_state: State): + # Given + cluster_peer = PeerRelation(PEER, PEER) + state_in = dataclasses.replace(base_state, relations=[cluster_peer]) + # When with patch("charm.sysctl.Config.remove") as patched_sysctl_remove: - harness.charm.on.remove.emit() + ctx.run(ctx.on.remove(), state_in) - patched_sysctl_remove.assert_called_once() + # Then + patched_sysctl_remove.assert_called_once() -def test_workload_version_is_setted(harness: Harness[KafkaCharm], monkeypatch): - output_install = "3.6.0-ubuntu0" - output_changed = "3.6.1-ubuntu0" - monkeypatch.setattr( - harness.charm.broker.workload, - "run_bin_command", - Mock(side_effect=[output_install, output_changed]), - ) - monkeypatch.setattr(harness.charm.broker.workload, "install", Mock(return_value=True)) - - harness.charm.on.install.emit() - assert harness.get_workload_version() == "3.6.0" +def test_workload_version_is_setted(ctx: Context, base_state: State): + # Given + output_bin_install = "3.6.0-ubuntu0" + output_bin_changed = "3.6.1-ubuntu0" + expected_version_installed = "3.6.0" + expected_version_changed = "3.6.1" + restart_peer = PeerRelation("restart", "rolling_op") + state_in = dataclasses.replace(base_state, relations=[restart_peer]) + # When with ( patch( "managers.config.ConfigManager.server_properties", @@ -699,9 +859,19 @@ def test_workload_version_is_setted(harness: Harness[KafkaCharm], monkeypatch): ), patch("events.broker.BrokerOperator.healthy", return_value=True), patch("workload.KafkaWorkload.read", return_value=["gandalf=white"]), + patch("workload.KafkaWorkload.install", return_value=True), + patch( + "workload.KafkaWorkload.run_bin_command", + side_effect=[output_bin_install, output_bin_changed], + ), patch("events.upgrade.KafkaUpgrade.idle", return_value=True), + patch( + "charms.rolling_ops.v0.rollingops.RollingOpsManager._on_run_with_lock", autospec=True + ), ): + state_intermediary = ctx.run(ctx.on.install(), state_in) + state_out = ctx.run(ctx.on.config_changed(), state_intermediary) - harness.charm.on.config_changed.emit() - - assert harness.get_workload_version() == "3.6.1" + # Then + assert ctx.workload_version_history == [expected_version_installed] + assert state_out.workload_version == expected_version_changed diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index f9ef283f..ce2ed358 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -1,21 +1,22 @@ #!/usr/bin/env python3 -# Copyright 2024 Canonical Ltd. +# Copyright 2023 Canonical Ltd. # See LICENSE file for licensing details. +import dataclasses +import json import logging -import os from pathlib import Path +from typing import cast from unittest.mock import PropertyMock, mock_open, patch import pytest import yaml -from ops.testing import Harness -from pydantic import ValidationError +from ops import CharmMeta +from ops.testing import Container, Context, PeerRelation, Relation, State, Storage from charm import KafkaCharm from literals import ( ADMIN_USER, - CHARM_KEY, CONTAINER, DEPENDENCIES, INTER_BROKER_USER, @@ -27,184 +28,193 @@ OAUTH_REL_NAME, PEER, PEER_CLUSTER_ORCHESTRATOR_RELATION, + REL_NAME, SUBSTRATE, ZK, ) -from managers.config import ConfigManager pytestmark = pytest.mark.broker + logger = logging.getLogger(__name__) -BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..")) -CONFIG = str(yaml.safe_load(Path(BASE_DIR + "/config.yaml").read_text())) -ACTIONS = str(yaml.safe_load(Path(BASE_DIR + "/actions.yaml").read_text())) -METADATA = str(yaml.safe_load(Path(BASE_DIR + "/metadata.yaml").read_text())) +CONFIG = yaml.safe_load(Path("./config.yaml").read_text()) +ACTIONS = yaml.safe_load(Path("./actions.yaml").read_text()) +METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) + + +@pytest.fixture() +def charm_configuration(): + """Enable direct mutation on configuration dict.""" + return json.loads(json.dumps(CONFIG)) -@pytest.fixture -def harness(): - harness = Harness(KafkaCharm, meta=METADATA, actions=ACTIONS, config=CONFIG) +@pytest.fixture() +def base_state(): if SUBSTRATE == "k8s": - harness.set_can_connect(CONTAINER, True) - - harness.add_relation("restart", CHARM_KEY) - harness._update_config( - { - "log_retention_ms": "-1", - "compression_type": "producer", - "expose-external": "none", - } - ) - harness.begin() - return harness + state = State(leader=True, containers=[Container(name=CONTAINER, can_connect=True)]) + + else: + state = State(leader=True) + + return state -def test_all_storages_in_log_dirs(harness: Harness[KafkaCharm]): +@pytest.fixture() +def ctx() -> Context: + ctx = Context(KafkaCharm, meta=METADATA, config=CONFIG, actions=ACTIONS, unit_id=0) + return ctx + + +def test_all_storages_in_log_dirs(ctx: Context, base_state: State) -> None: """Checks that the log.dirs property updates with all available storages.""" - storage_metadata = harness.charm.meta.storages["data"] - min_storages = storage_metadata.multiple_range[0] if storage_metadata.multiple_range else 1 - with harness.hooks_disabled(): - harness.add_storage(storage_name="data", count=min_storages, attach=True) + # Given + storage_medatada = CharmMeta(METADATA).storages["data"] + min_storages = storage_medatada.multiple_range[0] if storage_medatada.multiple_range else 1 + storages = [Storage("data") for _ in range(min_storages)] + state_in = dataclasses.replace(base_state, storages=storages) - assert len(harness.charm.state.log_dirs.split(",")) == len( - harness.charm.model.storages["data"] - ) + # When + with ctx(ctx.on.storage_attached(storages[0]), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) + # Then + assert len(charm.state.log_dirs.split(",")) == len(charm.model.storages["data"]) -def test_internal_credentials_only_return_when_all_present(harness: Harness[KafkaCharm]): - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - harness.update_relation_data( - peer_rel_id, CHARM_KEY, {f"{INTERNAL_USERS[0]}-password": "mellon"} + +def test_internal_credentials_only_return_when_all_present( + ctx: Context, base_state: State, passwords_data: dict[str, str] +) -> None: + # Given + cluster_peer_incomplete = PeerRelation( + PEER, PEER, local_app_data={f"{INTERNAL_USERS[0]}": "mellon"} ) + state_incomplete = dataclasses.replace(base_state, relations=[cluster_peer_incomplete]) + cluster_peer_complete = PeerRelation(PEER, PEER, local_app_data=passwords_data) + state_complete = dataclasses.replace(base_state, relations=[cluster_peer_complete]) - assert not harness.charm.state.cluster.internal_user_credentials + # When + with ctx(ctx.on.start(), state_incomplete) as manager: + charm = cast(KafkaCharm, manager.charm) - for user in INTERNAL_USERS: - harness.update_relation_data(peer_rel_id, CHARM_KEY, {f"{user}-password": "mellon"}) + # Then + assert not charm.state.cluster.internal_user_credentials - assert harness.charm.state.cluster.internal_user_credentials - assert len(harness.charm.state.cluster.internal_user_credentials) == len(INTERNAL_USERS) + # When + with ctx(ctx.on.start(), state_complete) as manager: + charm = cast(KafkaCharm, manager.charm) + # Then + assert charm.state.cluster.internal_user_credentials + assert len(charm.state.cluster.internal_user_credentials) == len(INTERNAL_USERS) -def test_log_dirs_in_server_properties(harness: Harness[KafkaCharm]): - """Checks that log.dirs are added to server_properties.""" - zk_relation_id = harness.add_relation(ZK, CHARM_KEY) - harness.update_relation_data( - zk_relation_id, - harness.charm.app.name, - { - "database": "/kafka", - "chroot": "/kafka", - "username": "moria", - "password": "mellon", - "endpoints": "1.1.1.1,2.2.2.2", - "uris": "1.1.1.1:2181/kafka,2.2.2.2:2181/kafka", - "tls": "disabled", - }, - ) - peer_relation_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/1") - harness.update_relation_data( - peer_relation_id, f"{CHARM_KEY}/0", {"private-address": "treebeard"} - ) +def test_log_dirs_in_server_properties(ctx: Context, base_state: State) -> None: + """Checks that log.dirs are added to server_properties.""" + # Given found_log_dirs = False - with patch( - "core.models.KafkaCluster.internal_user_credentials", - new_callable=PropertyMock, - return_value={INTER_BROKER_USER: "fangorn", ADMIN_USER: "forest"}, - ): - for prop in harness.charm.broker.config_manager.server_properties: + state_in = base_state + + # When + with (ctx(ctx.on.config_changed(), state_in) as manager,): + charm = cast(KafkaCharm, manager.charm) + for prop in charm.broker.config_manager.server_properties: if "log.dirs" in prop: found_log_dirs = True - assert found_log_dirs + # Then + assert found_log_dirs -def test_listeners_in_server_properties(harness: Harness[KafkaCharm]): - """Checks that listeners are split into INTERNAL and EXTERNAL.""" - zk_relation_id = harness.add_relation(ZK, CHARM_KEY) - harness.update_relation_data( - zk_relation_id, - harness.charm.app.name, - { - "database": "/kafka", - "chroot": "/kafka", - "username": "moria", - "password": "mellon", - "endpoints": "1.1.1.1,2.2.2.2", - "uris": "1.1.1.1:2181/kafka,2.2.2.2:2181/kafka", - "tls": "disabled", - }, +def test_listeners_in_server_properties( + charm_configuration: dict, base_state: State, zk_data: dict[str, str] +) -> None: + """Checks that listeners are split into INTERNAL, CLIENT and EXTERNAL.""" + # Given + charm_configuration["options"]["expose-external"]["default"] = "nodeport" + cluster_peer = PeerRelation(PEER, PEER, local_unit_data={"private-address": "treebeard"}) + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data) + client_relation = Relation(REL_NAME, "app") + state_in = dataclasses.replace( + base_state, relations=[cluster_peer, zk_relation, client_relation] ) - peer_relation_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/1") - harness.update_relation_data( - peer_relation_id, f"{CHARM_KEY}/0", {"private-address": "treebeard"} + ctx = Context( + KafkaCharm, meta=METADATA, config=charm_configuration, actions=ACTIONS, unit_id=0 ) host = "treebeard" if SUBSTRATE == "vm" else "kafka-k8s-0.kafka-k8s-endpoints" sasl_pm = "SASL_PLAINTEXT_SCRAM_SHA_512" - expected_listeners = f"listeners=INTERNAL_{sasl_pm}://0.0.0.0:19092" - expected_advertised_listeners = f"advertised.listeners=INTERNAL_{sasl_pm}://{host}:19092" - - with patch( - "core.models.KafkaCluster.internal_user_credentials", - new_callable=PropertyMock, - return_value={INTER_BROKER_USER: "fangorn", ADMIN_USER: "forest"}, - ): - assert expected_listeners in harness.charm.broker.config_manager.server_properties - assert ( - expected_advertised_listeners in harness.charm.broker.config_manager.server_properties - ) + expected_listeners = [ + f"INTERNAL_{sasl_pm}://0.0.0.0:19092", + f"CLIENT_{sasl_pm}://0.0.0.0:9092", + ] + expected_advertised_listeners = [ + f"INTERNAL_{sasl_pm}://{host}:19092", + f"CLIENT_{sasl_pm}://{host}:9092", + ] + if SUBSTRATE == "k8s": + expected_listeners += [f"EXTERNAL_{sasl_pm}://0.0.0.0:29092"] + expected_advertised_listeners += [ + f"EXTERNAL_{sasl_pm}://1234:20000" # values for nodeip:nodeport in conftest + ] -def test_extra_listeners_in_server_properties(harness: Harness[KafkaCharm]): + # When + with ( + patch( + "core.models.KafkaCluster.internal_user_credentials", + new_callable=PropertyMock, + return_value={INTER_BROKER_USER: "fangorn", ADMIN_USER: "forest"}, + ), + patch( + "managers.k8s.K8sManager._get_service", + ), + patch( + "managers.k8s.K8sManager.get_node_port", + ), + ctx(ctx.on.config_changed(), state_in) as manager, + ): + charm = cast(KafkaCharm, manager.charm) + + listeners = [ + prop + for prop in charm.broker.config_manager.server_properties + if prop.startswith("listeners=") + ][0] + advertised_listeners = [ + prop + for prop in charm.broker.config_manager.server_properties + if prop.startswith("advertised.listeners=") + ][0] + + # Then + for listener in expected_listeners: + assert listener in listeners + + for listener in expected_advertised_listeners: + assert listener in advertised_listeners + + +def test_extra_listeners_in_server_properties( + charm_configuration: dict, base_state: State, zk_data: dict[str, str] +): """Checks that the extra-listeners are properly set from config.""" - # verifying structured config validators - for value in [ - "missing.port", - "low.port:15000", - "high.port:60000", - "non.unique:30000,other.non.unique:30000", - "close.port:30000,other.close.port:30001", - ]: - with pytest.raises(ValidationError): - harness._update_config({"extra_listeners": value}) - harness.charm.broker.config_manager.config = harness.charm.config - - harness._update_config( - {"extra_listeners": "worker-{unit}.foo.com:30000,worker-{unit}.bar.com:40000"} + # Given + charm_configuration["options"]["extra_listeners"][ + "default" + ] = "worker-{unit}.foo.com:30000,worker-{unit}.bar.com:40000" + cluster_peer = PeerRelation(PEER, PEER, local_unit_data={"private-address": "treebeard"}) + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data) + client_relation = Relation( + REL_NAME, "app", remote_app_data={"extra-user-roles": "admin,producer"} ) - harness.charm.broker.config_manager.config = harness.charm.config - - peer_relation_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/1") - harness.update_relation_data( - peer_relation_id, f"{CHARM_KEY}/0", {"private-address": "treebeard"} + state_in = dataclasses.replace( + base_state, relations=[cluster_peer, zk_relation, client_relation] + ) + ctx = Context( + KafkaCharm, meta=METADATA, config=charm_configuration, actions=ACTIONS, unit_id=0 ) - - # adding client - client_relation_id = harness.add_relation("kafka-client", "app") - harness.update_relation_data(client_relation_id, "app", {"extra-user-roles": "admin,producer"}) - assert ( - len(harness.charm.broker.config_manager.all_listeners) == 4 - ) # 2 extra, 1 internal, 1 client - - # adding SSL - harness.update_relation_data(peer_relation_id, CHARM_KEY, {"tls": "enabled"}) - assert ( - len(harness.charm.broker.config_manager.all_listeners) == 4 - ) # 2 extra, 1 internal, 1 client - - # adding SSL - harness.update_relation_data(peer_relation_id, CHARM_KEY, {"mtls": "enabled"}) - assert ( - len(harness.charm.broker.config_manager.all_listeners) == 7 - ) # 2 extra sasl_ssl, 2 extra ssl, 1 internal, 2 client - expected_listener_names = { "INTERNAL_SASL_PLAINTEXT_SCRAM_SHA_512", "CLIENT_SASL_PLAINTEXT_SCRAM_SHA_512", @@ -215,48 +225,76 @@ def test_extra_listeners_in_server_properties(harness: Harness[KafkaCharm]): "EXTRA_SSL_SSL_1", } - advertised_listeners_prop = "" - for prop in harness.charm.broker.config_manager.server_properties: - if "advertised.listener" in prop: - advertised_listeners_prop = prop + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) - # validating every expected listener is present - for name in expected_listener_names: - assert name in advertised_listeners_prop + # Then + # 2 extra, 1 internal, 1 client + assert len(charm.broker.config_manager.all_listeners) == 4 - # validating their allocated ports are expected - ports = [] - for listener in advertised_listeners_prop.split("=")[1].split(","): - name, _, port = listener.split(":") + # Adding SSL + cluster_peer = dataclasses.replace(cluster_peer, local_app_data={"tls": "enabled"}) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, client_relation]) - if name.endswith("_0") or name.endswith("_1"): - # verifying allocation uses the baseport - digit = 10**4 - assert int(port) // digit * digit in (30000, 40000) + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) - # verifying allocation is in steps of 100 - digit = 10**2 - assert int(port) // digit * digit in (39000, 39100, 49000, 49100) + # Then + # 2 extra, 1 internal, 1 client + assert len(charm.broker.config_manager.all_listeners) == 4 - # verifying all ports are unique - assert port not in ports - ports.append(port) + # Adding SSL + cluster_peer = dataclasses.replace( + cluster_peer, local_app_data={"tls": "enabled", "mtls": "enabled"} + ) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, client_relation]) + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) -def test_oauth_client_listeners_in_server_properties(harness: Harness[KafkaCharm]): - """Checks that oauth client listeners are properly set when a relating through oauth.""" - harness.add_relation(ZK, CHARM_KEY) - peer_relation_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/1") - harness.update_relation_data( - peer_relation_id, f"{CHARM_KEY}/0", {"private-address": "treebeard"} - ) + # Then + # 2 extra sasl_ssl, 2 extra ssl, 1 internal, 2 client + assert len(charm.broker.config_manager.all_listeners) == 7 + + advertised_listeners_prop = "" + for prop in charm.broker.config_manager.server_properties: + if "advertised.listener" in prop: + advertised_listeners_prop = prop + + # validating every expected listener is present + for name in expected_listener_names: + assert name in advertised_listeners_prop + + # validating their allocated ports are expected + ports = [] + for listener in advertised_listeners_prop.split("=")[1].split(","): + name, _, port = listener.split(":") - oauth_relation_id = harness.add_relation(OAUTH_REL_NAME, "hydra") - harness.update_relation_data( - oauth_relation_id, + if name.endswith("_0") or name.endswith("_1"): + # verifying allocation uses the baseport + digit = 10**4 + assert int(port) // digit * digit in (30000, 40000) + + # verifying allocation is in steps of 100 + digit = 10**2 + assert int(port) // digit * digit in (39000, 39100, 49000, 49100) + + # verifying all ports are unique + assert port not in ports + ports.append(port) + + +def test_oauth_client_listeners_in_server_properties(ctx: Context, base_state: State) -> None: + """Checks that oauth client listeners are properly set when a relating through oauth.""" + # Given + cluster_peer = PeerRelation(PEER, PEER, local_unit_data={"private-address": "treebeard"}) + oauth_relation = Relation( + OAUTH_REL_NAME, "hydra", - { + remote_app_data={ "issuer_url": "issuer", "jwks_endpoint": "jwks", "authorization_endpoint": "authz", @@ -267,10 +305,12 @@ def test_oauth_client_listeners_in_server_properties(harness: Harness[KafkaCharm "jwt_access_token": "False", }, ) - - # let's add a scram client just for fun - client_relation_id = harness.add_relation("kafka-client", "app") - harness.update_relation_data(client_relation_id, "app", {"extra-user-roles": "admin,producer"}) + client_relation = Relation( + REL_NAME, "app", remote_app_data={"extra-user-roles": "admin,producer"} + ) + state_in = dataclasses.replace( + base_state, relations=[cluster_peer, oauth_relation, client_relation] + ) host = "treebeard" if SUBSTRATE == "vm" else "kafka-k8s-0.kafka-k8s-endpoints" internal_protocol, internal_port = "INTERNAL_SASL_PLAINTEXT_SCRAM_SHA_512", "19092" @@ -287,44 +327,37 @@ def test_oauth_client_listeners_in_server_properties(harness: Harness[KafkaCharm f"{scram_client_protocol}://{host}:{scram_client_port}," f"{oauth_client_protocol}://{host}:{oauth_client_port}" ) - assert expected_listeners in harness.charm.broker.config_manager.server_properties - assert expected_advertised_listeners in harness.charm.broker.config_manager.server_properties + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) -def test_ssl_listeners_in_server_properties(harness: Harness[KafkaCharm]): + # Then + assert expected_listeners in charm.broker.config_manager.server_properties + assert expected_advertised_listeners in charm.broker.config_manager.server_properties + + +def test_ssl_listeners_in_server_properties( + ctx: Context, base_state: State, zk_data: dict[str, str] +) -> None: """Checks that listeners are added after TLS relation are created.""" - zk_relation_id = harness.add_relation(ZK, CHARM_KEY) + # Given + cluster_peer = PeerRelation( + PEER, + PEER, + local_unit_data={"private-address": "treebeard", "certificate": "keepitsecret"}, + local_app_data={"tls": "enabled", "mtls": "enabled"}, + ) + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data | {"tls": "enabled"}) # Simulate data-integrator relation - client_relation_id = harness.add_relation("kafka-client", "app") - harness.update_relation_data(client_relation_id, "app", {"extra-user-roles": "admin,producer"}) - client_relation_id = harness.add_relation("kafka-client", "appii") - harness.update_relation_data( - client_relation_id, "appii", {"extra-user-roles": "admin,consumer"} + client_relation = Relation( + REL_NAME, "app", remote_app_data={"extra-user-roles": "admin,producer"} ) - - harness.update_relation_data( - zk_relation_id, - harness.charm.app.name, - { - "database": "/kafka", - "chroot": "/kafka", - "username": "moria", - "password": "mellon", - "endpoints": "1.1.1.1,2.2.2.2", - "uris": "1.1.1.1:2181/kafka,2.2.2.2:2181/kafka", - "tls": "enabled", - }, - ) - peer_relation_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/1") - harness.update_relation_data( - peer_relation_id, - f"{CHARM_KEY}/0", - {"private-address": "treebeard", "certificate": "keepitsecret"}, + client_ii_relation = Relation( + REL_NAME, "appii", remote_app_data={"extra-user-roles": "admin,consumer"} ) - - harness.update_relation_data( - peer_relation_id, CHARM_KEY, {"tls": "enabled", "mtls": "enabled"} + state_in = dataclasses.replace( + base_state, relations=[cluster_peer, zk_relation, client_relation, client_ii_relation] ) host = "treebeard" if SUBSTRATE == "vm" else "kafka-k8s-0.kafka-k8s-endpoints" @@ -333,24 +366,29 @@ def test_ssl_listeners_in_server_properties(harness: Harness[KafkaCharm]): expected_listeners = f"listeners=INTERNAL_{sasl_pm}://0.0.0.0:19093,CLIENT_{sasl_pm}://0.0.0.0:9093,CLIENT_{ssl_pm}://0.0.0.0:9094" expected_advertised_listeners = f"advertised.listeners=INTERNAL_{sasl_pm}://{host}:19093,CLIENT_{sasl_pm}://{host}:9093,CLIENT_{ssl_pm}://{host}:9094" - with patch( - "core.models.KafkaCluster.internal_user_credentials", - new_callable=PropertyMock, - return_value={INTER_BROKER_USER: "fangorn", ADMIN_USER: "forest"}, + # When + with ( + patch( + "core.models.KafkaCluster.internal_user_credentials", + new_callable=PropertyMock, + return_value={INTER_BROKER_USER: "fangorn", ADMIN_USER: "forest"}, + ), + ctx(ctx.on.config_changed(), state_in) as manager, ): - assert expected_listeners in harness.charm.broker.config_manager.server_properties - assert ( - expected_advertised_listeners in harness.charm.broker.config_manager.server_properties - ) + charm = cast(KafkaCharm, manager.charm) + + # Then + assert expected_listeners in charm.broker.config_manager.server_properties + assert expected_advertised_listeners in charm.broker.config_manager.server_properties -def test_zookeeper_config_succeeds_fails_config(harness: Harness[KafkaCharm]): +def test_zookeeper_config_succeeds_fails_config(ctx: Context, base_state: State) -> None: """Checks that no ZK config is returned if missing field.""" - zk_relation_id = harness.add_relation(ZK, CHARM_KEY) - harness.update_relation_data( - zk_relation_id, - harness.charm.app.name, - { + # Given + zk_relation = Relation( + ZK, + ZK, + remote_app_data={ "database": "/kafka", "chroot": "/kafka", "username": "moria", @@ -359,16 +397,23 @@ def test_zookeeper_config_succeeds_fails_config(harness: Harness[KafkaCharm]): "tls": "disabled", }, ) - assert not harness.charm.state.zookeeper.zookeeper_connected + state_in = dataclasses.replace(base_state, relations=[zk_relation]) + + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) + # Then + assert not charm.state.zookeeper.zookeeper_connected -def test_zookeeper_config_succeeds_valid_config(harness: Harness[KafkaCharm]): + +def test_zookeeper_config_succeeds_valid_config(ctx: Context, base_state: State) -> None: """Checks that ZK config is returned if all fields.""" - zk_relation_id = harness.add_relation(ZK, CHARM_KEY) - harness.update_relation_data( - zk_relation_id, - harness.charm.app.name, - { + # Given + zk_relation = Relation( + ZK, + ZK, + remote_app_data={ "database": "/kafka", "chroot": "/kafka", "username": "moria", @@ -378,272 +423,360 @@ def test_zookeeper_config_succeeds_valid_config(harness: Harness[KafkaCharm]): "tls": "disabled", }, ) - assert harness.charm.state.zookeeper.connect == "1.1.1.1:2181,2.2.2.2:2181/kafka" - assert harness.charm.state.zookeeper.zookeeper_connected + state_in = dataclasses.replace(base_state, relations=[zk_relation]) + + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) + # Then + assert charm.state.zookeeper.zookeeper_connected + assert charm.state.zookeeper.connect == "1.1.1.1:2181,2.2.2.2:2181/kafka" -def test_kafka_opts(harness: Harness[KafkaCharm]): + +def test_kafka_opts(ctx: Context, base_state: State) -> None: """Checks necessary args for KAFKA_OPTS.""" - args = harness.charm.broker.config_manager.kafka_opts - assert "-Djava.security.auth.login.config" in args - assert "KAFKA_OPTS" in args + # Given + state_in = base_state + + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) + + # Then + args = charm.broker.config_manager.kafka_opts + assert "-Djava.security.auth.login.config" in args + assert "KAFKA_OPTS" in args @pytest.mark.parametrize( "profile,expected", [("production", JVM_MEM_MAX_GB), ("testing", JVM_MEM_MIN_GB)], ) -def test_heap_opts(harness: Harness[KafkaCharm], profile, expected): +def test_heap_opts( + charm_configuration: dict, base_state: State, profile: str, expected: int +) -> None: """Checks necessary args for KAFKA_HEAP_OPTS.""" - # Harness doesn't reinitialize KafkaCharm when calling update_config, which means that - # self.config is not passed again to ConfigManager - harness.update_config({"profile": profile}) - conf_manager = ConfigManager( - harness.charm.state, harness.charm.workload, harness.charm.config, "1" + # Given + charm_configuration["options"]["profile"]["default"] = profile + ctx = Context( + KafkaCharm, meta=METADATA, config=charm_configuration, actions=ACTIONS, unit_id=0 ) - args = conf_manager.heap_opts + state_in = base_state + + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) + + args = charm.broker.config_manager.heap_opts + # Then assert f"Xms{expected}G" in args assert f"Xmx{expected}G" in args assert "KAFKA_HEAP_OPTS" in args -def test_kafka_jmx_opts(harness: Harness[KafkaCharm]): +def test_kafka_jmx_opts(ctx: Context, base_state: State) -> None: """Checks necessary args for KAFKA_JMX_OPTS.""" - args = harness.charm.broker.config_manager.kafka_jmx_opts + # Given + state_in = base_state + + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) + args = charm.broker.config_manager.kafka_jmx_opts + + # Then assert "-javaagent:" in args assert args.split(":")[1].split("=")[-1] == str(JMX_EXPORTER_PORT) assert "KAFKA_JMX_OPTS" in args -def test_cc_jmx_opts(harness: Harness[KafkaCharm]): +def test_cc_jmx_opts(ctx: Context, base_state: State) -> None: """Checks necessary args for CC_JMX_OPTS.""" - args = harness.charm.broker.config_manager.cc_jmx_opts + # Given + state_in = base_state + + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) + args = charm.broker.config_manager.cc_jmx_opts + + # Then assert "-javaagent:" in args assert args.split(":")[1].split("=")[-1] == str(JMX_CC_PORT) assert "CC_JMX_OPTS" in args -def test_set_environment(harness: Harness[KafkaCharm]): +def test_set_environment(ctx: Context, base_state: State) -> None: """Checks all necessary env-vars are written to /etc/environment.""" + # Given + state_in = base_state + + # When with ( patch("workload.KafkaWorkload.write") as patched_write, patch("builtins.open", mock_open()), patch("shutil.chown"), + ctx(ctx.on.config_changed(), state_in) as manager, ): - harness.charm.broker.config_manager.set_environment() + charm = cast(KafkaCharm, manager.charm) + charm.broker.config_manager.set_environment() - for call in patched_write.call_args_list: - assert "KAFKA_OPTS" in call.kwargs.get("content", "") - assert "KAFKA_LOG4J_OPTS" in call.kwargs.get("content", "") - assert "KAFKA_JMX_OPTS" in call.kwargs.get("content", "") - assert "KAFKA_HEAP_OPTS" in call.kwargs.get("content", "") - assert "KAFKA_JVM_PERFORMANCE_OPTS" in call.kwargs.get("content", "") - assert "/etc/environment" == call.kwargs.get("path", "") + # Then + for call in patched_write.call_args_list: + assert "KAFKA_OPTS" in call.kwargs.get("content", "") + assert "KAFKA_LOG4J_OPTS" in call.kwargs.get("content", "") + assert "KAFKA_JMX_OPTS" in call.kwargs.get("content", "") + assert "KAFKA_HEAP_OPTS" in call.kwargs.get("content", "") + assert "KAFKA_JVM_PERFORMANCE_OPTS" in call.kwargs.get("content", "") + assert "/etc/environment" == call.kwargs.get("path", "") -def test_bootstrap_server(harness: Harness[KafkaCharm]): +def test_bootstrap_server(ctx: Context, base_state: State) -> None: """Checks the bootstrap-server property setting.""" - peer_relation_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/1") - harness.update_relation_data( - peer_relation_id, f"{CHARM_KEY}/0", {"private-address": "treebeard"} + # Given + cluster_peer = PeerRelation( + PEER, + PEER, + local_unit_data={"private-address": "treebeard"}, + peers_data={1: {"private-address": "shelob"}}, ) - harness.update_relation_data(peer_relation_id, f"{CHARM_KEY}/1", {"private-address": "shelob"}) + state_in = dataclasses.replace(base_state, relations=[cluster_peer]) - assert len(harness.charm.state.bootstrap_server.split(",")) == 2 - for server in harness.charm.state.bootstrap_server.split(","): - assert "9092" in server + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) + # Then + assert len(charm.state.bootstrap_server.split(",")) == 2 + for server in charm.state.bootstrap_server.split(","): + assert "9092" in server -def test_default_replication_properties_less_than_three(harness: Harness[KafkaCharm]): + +def test_default_replication_properties_less_than_three(ctx: Context, base_state: State) -> None: """Checks replication property defaults updates with units < 3.""" - assert "num.partitions=1" in harness.charm.broker.config_manager.default_replication_properties - assert ( - "default.replication.factor=1" - in harness.charm.broker.config_manager.default_replication_properties - ) - assert ( - "min.insync.replicas=1" - in harness.charm.broker.config_manager.default_replication_properties - ) + # Given + state_in = base_state + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) -def test_default_replication_properties_more_than_three(harness: Harness[KafkaCharm]): + # Then + assert "num.partitions=1" in charm.broker.config_manager.default_replication_properties + assert ( + "default.replication.factor=1" + in charm.broker.config_manager.default_replication_properties + ) + assert ( + "min.insync.replicas=1" in charm.broker.config_manager.default_replication_properties + ) + + +def test_default_replication_properties_more_than_three(ctx: Context, base_state: State) -> None: """Checks replication property defaults updates with units > 3.""" - peer_relation_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/1") - harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/2") - harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/3") - harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/4") - harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/5") - - assert "num.partitions=3" in harness.charm.broker.config_manager.default_replication_properties - assert ( - "default.replication.factor=3" - in harness.charm.broker.config_manager.default_replication_properties - ) - assert ( - "min.insync.replicas=2" - in harness.charm.broker.config_manager.default_replication_properties - ) + # Given + cluster_peer = PeerRelation(PEER, PEER, peers_data={i: {} for i in range(1, 6)}) + state_in = dataclasses.replace(base_state, relations=[cluster_peer], planned_units=6) + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) -def test_ssl_principal_mapping_rules(harness: Harness[KafkaCharm]): - """Check that a change in ssl_principal_mapping_rules is reflected in server_properties.""" - harness.add_relation(PEER, CHARM_KEY) - zk_relation_id = harness.add_relation(ZK, CHARM_KEY) - harness.update_relation_data( - zk_relation_id, - harness.charm.app.name, - { - "database": "/kafka", - "chroot": "/kafka", - "username": "moria", - "password": "mellon", - "endpoints": "1.1.1.1,2.2.2.2", - "uris": "1.1.1.1:2181/kafka,2.2.2.2:2181/kafka", - "tls": "disabled", - }, - ) + # Then + assert "num.partitions=3" in charm.broker.config_manager.default_replication_properties + assert ( + "default.replication.factor=3" + in charm.broker.config_manager.default_replication_properties + ) + assert ( + "min.insync.replicas=2" in charm.broker.config_manager.default_replication_properties + ) - with patch( - "core.models.KafkaCluster.internal_user_credentials", - new_callable=PropertyMock, - return_value={INTER_BROKER_USER: "fangorn", ADMIN_USER: "forest"}, + +def test_ssl_principal_mapping_rules( + charm_configuration: dict, base_state: State, zk_data: dict[str, str] +) -> None: + """Check that a change in ssl_principal_mapping_rules is reflected in server_properties.""" + # Given + charm_configuration["options"]["ssl_principal_mapping_rules"][ + "default" + ] = "RULE:^(erebor)$/$1,DEFAULT" + cluster_peer = PeerRelation(PEER, PEER) + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, zk_relation]) + ctx = Context( + KafkaCharm, meta=METADATA, config=charm_configuration, actions=ACTIONS, unit_id=0 + ) + + # Given + with ( + patch( + "core.models.KafkaCluster.internal_user_credentials", + new_callable=PropertyMock, + return_value={INTER_BROKER_USER: "fangorn", ADMIN_USER: "forest"}, + ), + ctx(ctx.on.config_changed(), state_in) as manager, ): - # Harness doesn't reinitialize KafkaCharm when calling update_config, which means that - # self.config is not passed again to ConfigManager - harness._update_config({"ssl_principal_mapping_rules": "RULE:^(erebor)$/$1,DEFAULT"}) - conf_manager = ConfigManager( - harness.charm.state, harness.charm.workload, harness.charm.config, "1" - ) + charm = cast(KafkaCharm, manager.charm) + # Then assert ( "ssl.principal.mapping.rules=RULE:^(erebor)$/$1,DEFAULT" - in conf_manager.server_properties + in charm.broker.config_manager.server_properties ) -def test_auth_properties(harness: Harness[KafkaCharm]): +def test_auth_properties(ctx: Context, base_state: State, zk_data: dict[str, str]) -> None: """Checks necessary auth properties are present.""" - zk_relation_id = harness.add_relation(ZK, CHARM_KEY) - peer_relation_id = harness.add_relation(PEER, CHARM_KEY) - harness.update_relation_data( - peer_relation_id, harness.charm.app.name, {"sync_password": "mellon"} - ) - harness.update_relation_data( - zk_relation_id, - harness.charm.app.name, - { - "database": "/kafka", - "chroot": "/kafka", - "username": "moria", - "password": "mellon", - "endpoints": "1.1.1.1,2.2.2.2", - "uris": "1.1.1.1:2181/kafka,2.2.2.2:2181/kafka", - "tls": "disabled", - }, - ) + # Given + cluster_peer = PeerRelation(PEER, PEER) + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, zk_relation]) - assert "broker.id=0" in harness.charm.broker.config_manager.auth_properties - assert ( - f"zookeeper.connect={harness.charm.state.zookeeper.connect}" - in harness.charm.broker.config_manager.auth_properties - ) + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) + + # Then + assert "broker.id=0" in charm.broker.config_manager.auth_properties + assert ( + f"zookeeper.connect={charm.state.zookeeper.connect}" + in charm.broker.config_manager.auth_properties + ) -def test_rack_properties(harness: Harness[KafkaCharm]): +def test_rack_properties(ctx: Context, base_state: State, zk_data: dict[str, str]) -> None: """Checks that rack properties are added to server properties.""" - harness.add_relation(PEER, CHARM_KEY) - zk_relation_id = harness.add_relation(ZK, CHARM_KEY) - harness.update_relation_data( - zk_relation_id, - harness.charm.app.name, - { - "database": "/kafka", - "chroot": "/kafka", - "username": "moria", - "password": "mellon", - "endpoints": "1.1.1.1,2.2.2.2", - "uris": "1.1.1.1:2181/kafka,2.2.2.2:2181/kafka", - "tls": "disabled", - }, - ) + # Given + cluster_peer = PeerRelation(PEER, PEER) + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, zk_relation]) - with patch( - "managers.config.ConfigManager.rack_properties", - new_callable=PropertyMock, - return_value=["broker.rack=gondor-west"], + # When + with ( + patch( + "managers.config.ConfigManager.rack_properties", + new_callable=PropertyMock, + return_value=["broker.rack=gondor-west"], + ), + ctx(ctx.on.config_changed(), state_in) as manager, ): - assert "broker.rack=gondor-west" in harness.charm.broker.config_manager.server_properties + charm = cast(KafkaCharm, manager.charm) + + # Then + assert "broker.rack=gondor-west" in charm.broker.config_manager.server_properties -def test_inter_broker_protocol_version(harness: Harness[KafkaCharm]): +def test_inter_broker_protocol_version(ctx: Context, base_state: State, zk_data) -> None: """Checks that rack properties are added to server properties.""" - harness.add_relation(PEER, CHARM_KEY) - zk_relation_id = harness.add_relation(ZK, CHARM_KEY) - harness.update_relation_data( - zk_relation_id, - harness.charm.app.name, - { - "database": "/kafka", - "chroot": "/kafka", - "username": "moria", - "password": "mellon", - "endpoints": "1.1.1.1,2.2.2.2", - "uris": "1.1.1.1:2181/kafka,2.2.2.2:2181/kafka", - "tls": "disabled", - }, - ) - assert len(DEPENDENCIES["kafka_service"]["version"].split(".")) == 3 + # Given + cluster_peer = PeerRelation(PEER, PEER) + zk_relation = Relation(ZK, ZK, remote_app_data=zk_data) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, zk_relation]) - assert ( - "inter.broker.protocol.version=3.6" - in harness.charm.broker.config_manager.server_properties - ) + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) + + # Then + assert "inter.broker.protocol.version=3.6" in charm.broker.config_manager.server_properties + assert len(DEPENDENCIES["kafka_service"]["version"].split(".")) == 3 -def test_super_users(harness: Harness[KafkaCharm]): +def test_super_users(ctx: Context, base_state: State) -> None: """Checks super-users property is updated for new admin clients.""" - peer_relation_id = harness.add_relation(PEER, CHARM_KEY) - app_relation_id = harness.add_relation("kafka-client", "app") - harness.update_relation_data(app_relation_id, "app", {"extra-user-roles": "admin,producer"}) - appii_relation_id = harness.add_relation("kafka-client", "appii") - harness.update_relation_data( - appii_relation_id, "appii", {"extra-user-roles": "admin,consumer"} + # Given + cluster_peer = PeerRelation(PEER, PEER) + client_relation = Relation( + REL_NAME, "app", remote_app_data={"extra-user-roles": "admin,producer"} ) + client_ii_relation = Relation( + REL_NAME, "appii", remote_app_data={"extra-user-roles": "admin,consumer"} + ) + state_in = dataclasses.replace( + base_state, relations=[cluster_peer, client_relation, client_ii_relation] + ) + + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) - assert len(harness.charm.state.super_users.split(";")) == len(INTERNAL_USERS) + # Then + assert len(charm.state.super_users.split(";")) == len(INTERNAL_USERS) - harness.update_relation_data( - peer_relation_id, harness.charm.app.name, {f"relation-{app_relation_id}": "mellon"} + cluster_peer = dataclasses.replace( + cluster_peer, local_app_data={f"relation-{client_relation.id}": "mellon"} ) + state_in = dataclasses.replace( + base_state, relations=[cluster_peer, client_relation, client_ii_relation] + ) + + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) - assert len(harness.charm.state.super_users.split(";")) == (len(INTERNAL_USERS) + 1) + # Then + assert len(charm.state.super_users.split(";")) == len(INTERNAL_USERS) + 1 - harness.update_relation_data( - peer_relation_id, harness.charm.app.name, {f"relation-{appii_relation_id}": "mellon"} + cluster_peer = dataclasses.replace( + cluster_peer, + local_app_data={ + f"relation-{client_relation.id}": "mellon", + f"relation-{client_ii_relation.id}": "mellon", + }, + ) + state_in = dataclasses.replace( + base_state, relations=[cluster_peer, client_relation, client_ii_relation] ) - assert len(harness.charm.state.super_users.split(";")) == (len(INTERNAL_USERS) + 2) + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) - harness.update_relation_data(appii_relation_id, "appii", {"extra-user-roles": "consumer"}) + # Then + assert len(charm.state.super_users.split(";")) == len(INTERNAL_USERS) + 2 - assert len(harness.charm.state.super_users.split(";")) == (len(INTERNAL_USERS) + 1) + client_ii_relation = dataclasses.replace( + client_ii_relation, remote_app_data={"extra-user-roles": "consumer"} + ) + state_in = dataclasses.replace( + base_state, relations=[cluster_peer, client_relation, client_ii_relation] + ) + + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) + + # Then + assert len(charm.state.super_users.split(";")) == len(INTERNAL_USERS) + 1 -def test_cruise_control_reporter_only_with_balancer(harness: Harness[KafkaCharm]): +def test_cruise_control_reporter_only_with_balancer(ctx: Context, base_state: State): + # Given + state_in = base_state reporters_config_value = "metric.reporters=com.linkedin.kafka.cruisecontrol.metricsreporter.CruiseControlMetricsReporter" - # Default roles value does not include balancer - assert reporters_config_value not in harness.charm.broker.config_manager.server_properties - with harness.hooks_disabled(): - peer_cluster_relation_id = harness.add_relation( - PEER_CLUSTER_ORCHESTRATOR_RELATION, CHARM_KEY - ) - harness.update_relation_data( - peer_cluster_relation_id, harness.charm.app.name, {"roles": "broker,balancer"} - ) + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) + + # Then + # Default roles value does not include balancer + assert reporters_config_value not in charm.broker.config_manager.server_properties + + # Given + + cluster_peer = PeerRelation(PEER, PEER) + cluster_peer_cluster = Relation( + PEER_CLUSTER_ORCHESTRATOR_RELATION, "peer-cluster", remote_app_data={"roles": "balancer"} + ) + state_in = dataclasses.replace(base_state, relations=[cluster_peer, cluster_peer_cluster]) + + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) - assert reporters_config_value in harness.charm.broker.config_manager.server_properties + # Then + assert reporters_config_value in charm.broker.config_manager.server_properties diff --git a/tests/unit/test_health.py b/tests/unit/test_health.py index 369027b8..0ec882bc 100644 --- a/tests/unit/test_health.py +++ b/tests/unit/test_health.py @@ -1,50 +1,67 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. +import json import logging from pathlib import Path +from typing import cast from unittest.mock import mock_open, patch import pytest import yaml -from ops.testing import Harness +from ops.testing import Container, Context, State from charm import KafkaCharm -from literals import CHARM_KEY, JVM_MEM_MAX_GB, JVM_MEM_MIN_GB, SUBSTRATE - -logger = logging.getLogger(__name__) +from literals import ( + CONTAINER, + JVM_MEM_MAX_GB, + JVM_MEM_MIN_GB, + SUBSTRATE, +) pytestmark = [ pytest.mark.broker, pytest.mark.skipif(SUBSTRATE == "k8s", reason="health checks not used on K8s"), ] -CONFIG = str(yaml.safe_load(Path("./config.yaml").read_text())) -ACTIONS = str(yaml.safe_load(Path("./actions.yaml").read_text())) -METADATA = str(yaml.safe_load(Path("./metadata.yaml").read_text())) + +logger = logging.getLogger(__name__) -@pytest.fixture -def harness(): - harness = Harness(KafkaCharm, meta=METADATA, actions=ACTIONS, config=CONFIG) - harness.add_relation("restart", CHARM_KEY) - harness._update_config( - { - "log_retention_ms": "-1", - "compression_type": "producer", - } - ) - harness.begin() - storage_metadata = getattr(harness.charm, "meta").storages["data"] - min_storages = storage_metadata.multiple_range[0] if storage_metadata.multiple_range else 0 - with harness.hooks_disabled(): - harness.add_storage(storage_name="data", count=min_storages, attach=True) +CONFIG = yaml.safe_load(Path("./config.yaml").read_text()) +ACTIONS = yaml.safe_load(Path("./actions.yaml").read_text()) +METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) + + +@pytest.fixture() +def charm_configuration(): + """Enable direct mutation on configuration dict.""" + return json.loads(json.dumps(CONFIG)) - return harness +@pytest.fixture() +def base_state(): + if SUBSTRATE == "k8s": + state = State(leader=True, containers=[Container(name=CONTAINER, can_connect=True)]) -def test_service_pid(harness: Harness[KafkaCharm]): + else: + state = State(leader=True) + + return state + + +@pytest.fixture() +def ctx() -> Context: + ctx = Context(KafkaCharm, meta=METADATA, config=CONFIG, actions=ACTIONS, unit_id=0) + return ctx + + +def test_service_pid(ctx: Context, base_state: State) -> None: + # Given + state_in = base_state + + # When with ( patch( "builtins.open", @@ -52,19 +69,31 @@ def test_service_pid(harness: Harness[KafkaCharm]): read_data="0::/system.slice/snap.charmed-kafka.daemon.service", ), patch("subprocess.check_output", return_value="1314231"), + ctx(ctx.on.config_changed(), state_in) as manager, ): - assert harness.charm.broker.health._service_pid == 1314231 + charm = cast(KafkaCharm, manager.charm) + + # Then + assert charm.broker.health._service_pid == 1314231 -def test_check_vm_swappiness(harness: Harness[KafkaCharm]): +def test_check_vm_swappiness(ctx: Context, base_state: State) -> None: + # Given + state_in = base_state + + # When with ( patch("health.KafkaHealth._get_vm_swappiness", return_value=5), patch("health.KafkaHealth._check_file_descriptors", return_value=True), patch("health.KafkaHealth._check_memory_maps", return_value=True), patch("health.KafkaHealth._check_total_memory", return_value=True), + ctx(ctx.on.config_changed(), state_in) as manager, ): - assert not harness.charm.broker.health._check_vm_swappiness() - assert not harness.charm.broker.health.machine_configured() + charm = cast(KafkaCharm, manager.charm) + + # Then + assert not charm.broker.health._check_vm_swappiness() + assert not charm.broker.health.machine_configured() @pytest.mark.parametrize("total_mem_kb", [5741156, 65741156]) @@ -72,27 +101,58 @@ def test_check_vm_swappiness(harness: Harness[KafkaCharm]): "profile,limit", [("testing", JVM_MEM_MIN_GB), ("production", JVM_MEM_MAX_GB)] ) def test_check_total_memory_testing_profile( - harness: Harness[KafkaCharm], total_mem_kb, profile, limit -): - harness._update_config({"profile": profile}) + charm_configuration: dict, base_state: State, total_mem_kb: int, profile: str, limit: int +) -> None: + # Given + charm_configuration["options"]["profile"]["default"] = profile + state_in = base_state + ctx = Context( + KafkaCharm, meta=METADATA, config=charm_configuration, actions=ACTIONS, unit_id=0 + ) + + # When + with ( + patch("workload.KafkaWorkload.read", return_value=[f"MemTotal: {total_mem_kb} kB"]), + ctx(ctx.on.config_changed(), state_in) as manager, + ): + charm = cast(KafkaCharm, manager.charm) - with patch("workload.KafkaWorkload.read", return_value=[f"MemTotal: {total_mem_kb} kB"]): + # Then if total_mem_kb / 1000000 <= limit: - assert not harness.charm.broker.health._check_total_memory() + assert not charm.broker.health._check_total_memory() else: - assert harness.charm.broker.health._check_total_memory() + assert charm.broker.health._check_total_memory() -def test_get_partitions_size(harness: Harness[KafkaCharm]): +def test_get_partitions_size(ctx: Context, base_state: State) -> None: + # Given example_log_dirs = 'Querying brokers for log directories information\nReceived log directory information from brokers 0\n{"version":1,"brokers":[{"broker":0,"logDirs":[{"logDir":"/var/snap/charmed-kafka/common/var/lib/kafka/data/0","error":null,"partitions":[{"partition":"NEW-TOPIC-2-4","size":394,"offsetLag":0,"isFuture":false},{"partition":"NEW-TOPIC-2-3","size":394,"offsetLag":0,"isFuture":false},{"partition":"NEW-TOPIC-2-2","size":392,"offsetLag":0,"isFuture":false},{"partition":"NEW-TOPIC-2-1","size":392,"offsetLag":0,"isFuture":false},{"partition":"NEW-TOPIC-2-0","size":393,"offsetLag":0,"isFuture":false}]}]}]}\n' + state_in = base_state - with patch("workload.KafkaWorkload.run_bin_command", return_value=example_log_dirs): - assert harness.charm.broker.health._get_partitions_size() == (5, 393) + # When + with ( + patch("workload.KafkaWorkload.run_bin_command", return_value=example_log_dirs), + ctx(ctx.on.config_changed(), state_in) as manager, + ): + charm = cast(KafkaCharm, manager.charm) + # Then + assert charm.broker.health._get_partitions_size() == (5, 393) -def test_check_file_descriptors_no_listeners(harness: Harness[KafkaCharm]): - with patch("workload.KafkaWorkload.run_bin_command") as patched_run_bin: - assert harness.charm.broker.health._check_file_descriptors() + +def test_check_file_descriptors_no_listeners(ctx: Context, base_state: State) -> None: + # Given + state_in = base_state + + # When + with ( + patch("workload.KafkaWorkload.run_bin_command") as patched_run_bin, + ctx(ctx.on.config_changed(), state_in) as manager, + ): + charm = cast(KafkaCharm, manager.charm) + + # Then + assert charm.broker.health._check_file_descriptors() assert patched_run_bin.call_count == 0 @@ -100,14 +160,24 @@ def test_check_file_descriptors_no_listeners(harness: Harness[KafkaCharm]): @pytest.mark.parametrize("fd", [True, False]) @pytest.mark.parametrize("swap", [True, False]) @pytest.mark.parametrize("mem", [True, False]) -def test_machine_configured_succeeds_and_fails(harness: Harness[KafkaCharm], mmap, fd, swap, mem): +def test_machine_configured_succeeds_and_fails( + ctx: Context, base_state: State, mmap: bool, fd: bool, swap: bool, mem: bool +) -> None: + # Given + state_in = base_state + + # When with ( patch("health.KafkaHealth._check_memory_maps", return_value=mmap), patch("health.KafkaHealth._check_file_descriptors", return_value=fd), patch("health.KafkaHealth._check_vm_swappiness", return_value=swap), patch("health.KafkaHealth._check_total_memory", return_value=mem), + ctx(ctx.on.config_changed(), state_in) as manager, ): + charm = cast(KafkaCharm, manager.charm) + + # Then if all([mmap, fd, swap, mem]): - assert harness.charm.broker.health.machine_configured() + assert charm.broker.health.machine_configured() else: - assert not harness.charm.broker.health.machine_configured() + assert not charm.broker.health.machine_configured() diff --git a/tests/unit/scenario/test_kraft.py b/tests/unit/test_kraft.py similarity index 98% rename from tests/unit/scenario/test_kraft.py rename to tests/unit/test_kraft.py index dd9e756a..4a5b27e1 100644 --- a/tests/unit/scenario/test_kraft.py +++ b/tests/unit/test_kraft.py @@ -11,7 +11,7 @@ import pytest import yaml from ops import ActiveStatus -from scenario import Container, Context, PeerRelation, Relation, State +from ops.testing import Container, Context, PeerRelation, Relation, State from charm import KafkaCharm from literals import ( diff --git a/tests/unit/test_provider.py b/tests/unit/test_provider.py index 79ec6369..715fa46a 100644 --- a/tests/unit/test_provider.py +++ b/tests/unit/test_provider.py @@ -2,51 +2,66 @@ # Copyright 2023 Canonical Ltd. # See LICENSE file for licensing details. +import dataclasses import logging from pathlib import Path from unittest.mock import PropertyMock, patch import pytest import yaml -from ops.testing import Harness +from ops.testing import Container, Context, PeerRelation, Relation, Secret, State from charm import KafkaCharm -from literals import CHARM_KEY, CONTAINER, PEER, REL_NAME, SUBSTRATE - -logger = logging.getLogger(__name__) +from literals import ( + CONTAINER, + PEER, + REL_NAME, + SUBSTRATE, + ZK, +) pytestmark = pytest.mark.broker -CONFIG = str(yaml.safe_load(Path("./config.yaml").read_text())) -ACTIONS = str(yaml.safe_load(Path("./actions.yaml").read_text())) -METADATA = str(yaml.safe_load(Path("./metadata.yaml").read_text())) +logger = logging.getLogger(__name__) + + +CONFIG = yaml.safe_load(Path("./config.yaml").read_text()) +ACTIONS = yaml.safe_load(Path("./actions.yaml").read_text()) +METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) -@pytest.fixture -def harness(): - harness = Harness(KafkaCharm, meta=METADATA, actions=ACTIONS, config=CONFIG) +@pytest.fixture() +def base_state(): if SUBSTRATE == "k8s": - harness.set_can_connect(CONTAINER, True) - - harness.add_relation("restart", CHARM_KEY) - harness._update_config( - { - "log_retention_ms": "-1", - "compression_type": "producer", - "expose-external": "none", - } - ) + state = State(leader=True, containers=[Container(name=CONTAINER, can_connect=True)]) - harness.begin() - return harness + else: + state = State(leader=True) + return state -def test_client_relation_created_defers_if_not_ready(harness: Harness[KafkaCharm]): - """Checks event is deferred if not ready on clientrelationcreated hook.""" - with harness.hooks_disabled(): - harness.add_relation(PEER, CHARM_KEY) +@pytest.fixture() +def ctx() -> Context: + ctx = Context(KafkaCharm, meta=METADATA, config=CONFIG, actions=ACTIONS, unit_id=0) + return ctx + + +def test_client_relation_created_defers_if_not_ready(ctx: Context, base_state: State) -> None: + # Given + cluster_peer = PeerRelation(PEER, PEER) + zk_relation = Relation(ZK, ZK) + client_relation = Relation( + REL_NAME, + "app", + remote_app_data={"topic": "TOPIC", "extra-user-roles": "consumer,producer"}, + ) + state_in = dataclasses.replace( + base_state, relations=[cluster_peer, zk_relation, client_relation] + ) + + # When with ( patch( "events.broker.BrokerOperator.healthy", new_callable=PropertyMock, return_value=False @@ -54,26 +69,27 @@ def test_client_relation_created_defers_if_not_ready(harness: Harness[KafkaCharm patch("managers.auth.AuthManager.add_user") as patched_add_user, patch("ops.framework.EventBase.defer") as patched_defer, ): - harness.set_leader(True) - client_rel_id = harness.add_relation(REL_NAME, "app") - # update relation to trigger on_topic_requested event - harness.update_relation_data( - client_rel_id, - "app", - {"topic": "TOPIC", "extra-user-roles": "consumer,producer"}, - ) - - patched_add_user.assert_not_called() - patched_defer.assert_called() - - -def test_client_relation_created_adds_user(harness: Harness[KafkaCharm]): - """Checks if new users are added on clientrelationcreated hook.""" - with harness.hooks_disabled(): - harness.add_relation(PEER, CHARM_KEY) - harness.set_leader(True) - client_rel_id = harness.add_relation(REL_NAME, "app") + ctx.run(ctx.on.relation_changed(client_relation), state_in) + + # Then + patched_add_user.assert_not_called() + patched_defer.assert_called() + +def test_client_relation_created_adds_user(ctx: Context, base_state: State) -> None: + # Given + cluster_peer = PeerRelation(PEER, PEER) + zk_relation = Relation(ZK, ZK) + client_relation = Relation( + REL_NAME, + "app", + remote_app_data={"topic": "TOPIC", "extra-user-roles": "consumer,producer"}, + ) + state_in = dataclasses.replace( + base_state, relations=[cluster_peer, zk_relation, client_relation] + ) + + # When with ( patch( "events.broker.BrokerOperator.healthy", new_callable=PropertyMock, return_value=True @@ -82,21 +98,33 @@ def test_client_relation_created_adds_user(harness: Harness[KafkaCharm]): patch("workload.KafkaWorkload.run_bin_command"), patch("core.cluster.ZooKeeper.connect", new_callable=PropertyMock, return_value="yes"), ): - harness.update_relation_data( - client_rel_id, - "app", - {"topic": "TOPIC", "extra-user-roles": "consumer,producer"}, - ) + state_out = ctx.run(ctx.on.relation_changed(client_relation), state_in) - patched_add_user.assert_called_once() - assert harness.charm.state.cluster.relation_data.get(f"relation-{client_rel_id}") + # Then + patched_add_user.assert_called_once() + assert f"relation-{client_relation.id}" in next(iter(state_out.secrets)).tracked_content -def test_client_relation_broken_removes_user(harness: Harness[KafkaCharm]): +def test_client_relation_broken_removes_user(ctx: Context, base_state: State) -> None: """Checks if users are removed on clientrelationbroken hook.""" - with harness.hooks_disabled(): - harness.add_relation(PEER, CHARM_KEY) + # Given + cluster_peer = PeerRelation(PEER, PEER) + zk_relation = Relation(ZK, ZK) + client_relation = Relation( + REL_NAME, + "app", + remote_app_data={"topic": "TOPIC", "extra-user-roles": "consumer,producer"}, + ) + secret = Secret( + tracked_content={f"relation-{client_relation.id}": "password"}, + owner="app", + label="cluster.kafka-k8s.app" if SUBSTRATE == "k8s" else "cluster.kafka.app", + ) + state_in = dataclasses.replace( + base_state, relations=[cluster_peer, zk_relation, client_relation], secrets=[secret] + ) + # When with ( patch( "events.broker.BrokerOperator.healthy", new_callable=PropertyMock, return_value=True @@ -107,30 +135,32 @@ def test_client_relation_broken_removes_user(harness: Harness[KafkaCharm]): patch("workload.KafkaWorkload.run_bin_command"), patch("core.cluster.ZooKeeper.connect", new_callable=PropertyMock, return_value="yes"), ): - harness.set_leader(True) - client_rel_id = harness.add_relation(REL_NAME, "app") - harness.update_relation_data( - client_rel_id, - "app", - {"topic": "TOPIC", "extra-user-roles": "consumer,producer"}, - ) - - # validating username got added - assert harness.charm.state.cluster.relation_data.get(f"relation-{client_rel_id}") - - harness.remove_relation(client_rel_id) + state_out = ctx.run(ctx.on.relation_broken(client_relation), state_in) - # validating username got removed - assert not harness.charm.state.cluster.relation_data.get(f"relation-{client_rel_id}") - patched_remove_acls.assert_called_once() - patched_delete_user.assert_called_once() + # Then + patched_remove_acls.assert_called_once() + patched_delete_user.assert_called_once() + # validating username got removed, by removing the full secret + assert not state_out.secrets -def test_client_relation_joined_sets_necessary_relation_data(harness: Harness[KafkaCharm]): +def test_client_relation_joined_sets_necessary_relation_data( + ctx: Context, base_state: State +) -> None: """Checks if all needed provider relation data is set on clientrelationjoined hook.""" - with harness.hooks_disabled(): - harness.add_relation(PEER, CHARM_KEY) + # Given + cluster_peer = PeerRelation(PEER, PEER) + zk_relation = Relation(ZK, ZK) + client_relation = Relation( + REL_NAME, + "app", + remote_app_data={"topic": "TOPIC", "extra-user-roles": "consumer,producer"}, + ) + state_in = dataclasses.replace( + base_state, relations=[cluster_peer, zk_relation, client_relation] + ) + # When with ( patch( "events.broker.BrokerOperator.healthy", new_callable=PropertyMock, return_value=True @@ -139,35 +169,23 @@ def test_client_relation_joined_sets_necessary_relation_data(harness: Harness[Ka patch("workload.KafkaWorkload.run_bin_command"), patch("core.models.ZooKeeper.uris", new_callable=PropertyMock, return_value="yes"), ): - harness.set_leader(True) - client_rel_id = harness.add_relation(REL_NAME, "app") - client_relation = harness.charm.model.relations[REL_NAME][0] - - harness.update_relation_data( - client_relation.id, "app", {"topic": "TOPIC", "extra-user-roles": "consumer"} - ) - harness.add_relation_unit(client_rel_id, "app/0") - assert sorted( - [ - "username", - "password", - "tls-ca", - "endpoints", - "data", - "zookeeper-uris", - "consumer-group-prefix", - "tls", - "topic", - ] - ) == sorted(client_relation.data[harness.charm.app].keys()) - - assert client_relation.data[harness.charm.app].get("tls", None) == "disabled" - assert client_relation.data[harness.charm.app].get("zookeeper-uris", None) == "yes" - assert ( - client_relation.data[harness.charm.app].get("username", None) - == f"relation-{client_rel_id}" - ) - assert ( - client_relation.data[harness.charm.app].get("consumer-group-prefix", None) - == f"relation-{client_rel_id}-" - ) + state_out = ctx.run(ctx.on.relation_changed(client_relation), state_in) + + # Then + relation_databag = state_out.get_relation(client_relation.id).local_app_data + assert not { + "username", + "password", + "tls-ca", + "endpoints", + "data", + "zookeeper-uris", + "consumer-group-prefix", + "tls", + "topic", + } - set(relation_databag.keys()) + + assert relation_databag.get("tls", None) == "disabled" + assert relation_databag.get("zookeeper-uris", None) == "yes" + assert relation_databag.get("username", None) == f"relation-{client_relation.id}" + assert relation_databag.get("consumer-group-prefix", None) == f"relation-{client_relation.id}-" diff --git a/tests/unit/test_structured_config.py b/tests/unit/test_structured_config.py index e6b68e54..c9c6b284 100644 --- a/tests/unit/test_structured_config.py +++ b/tests/unit/test_structured_config.py @@ -8,33 +8,65 @@ import pytest import yaml -from ops.testing import Harness +from ops.testing import Container, Context, State +from pydantic import ValidationError from charm import KafkaCharm -from literals import CHARM_KEY, CONTAINER, SUBSTRATE +from core.structured_config import CharmConfig +from literals import ( + CONTAINER, + SUBSTRATE, +) pytestmark = [pytest.mark.broker, pytest.mark.balancer] -CONFIG = str(yaml.safe_load(Path("./config.yaml").read_text())) -ACTIONS = str(yaml.safe_load(Path("./actions.yaml").read_text())) -METADATA = str(yaml.safe_load(Path("./metadata.yaml").read_text())) logger = logging.getLogger(__name__) -@pytest.fixture -def harness(): - harness = Harness(KafkaCharm, meta=METADATA, config=CONFIG, actions=ACTIONS) +CONFIG = yaml.safe_load(Path("./config.yaml").read_text()) +ACTIONS = yaml.safe_load(Path("./actions.yaml").read_text()) +METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) + +@pytest.fixture() +def base_state(): if SUBSTRATE == "k8s": - harness.set_can_connect(CONTAINER, True) + state = State(leader=True, containers=[Container(name=CONTAINER, can_connect=True)]) + + else: + state = State(leader=True) + + return state + + +@pytest.fixture() +def ctx() -> Context: + ctx = Context(KafkaCharm, meta=METADATA, config=CONFIG, actions=ACTIONS, unit_id=0) + return ctx + - harness.add_relation("restart", CHARM_KEY) - harness.begin() - return harness +def check_valid_values(field: str, accepted_values: Iterable) -> None: + """Check the correctness of the passed values for a field.""" + flat_config_options = { + option_name: mapping.get("default") for option_name, mapping in CONFIG["options"].items() + } + for value in accepted_values: + CharmConfig(**{**flat_config_options, **{field: value}}) + + +def check_invalid_values(field: str, erroneus_values: Iterable) -> None: + """Check the incorrectness of the passed values for a field.""" + flat_config_options = { + option_name: mapping.get("default") for option_name, mapping in CONFIG["options"].items() + } + for value in erroneus_values: + with pytest.raises(ValidationError) as excinfo: + CharmConfig(**{**flat_config_options, **{field: value}}) + assert field in excinfo.value.errors()[0]["loc"] -def test_config_parsing_parameters_integer_values(harness) -> None: +def test_config_parsing_parameters_integer_values() -> None: """Check that integer fields are parsed correctly.""" integer_fields = [ "log_flush_offset_checkpoint_interval_ms", @@ -47,59 +79,41 @@ def test_config_parsing_parameters_integer_values(harness) -> None: erroneus_values = [2147483648, -2147483649] valid_values = [42, 1000, 1] for field in integer_fields: - check_invalid_values(harness, field, erroneus_values) - check_valid_values(harness, field, valid_values) + check_invalid_values(field, erroneus_values) + check_valid_values(field, valid_values) -def check_valid_values( - _harness, field: str, accepted_values: Iterable, is_long_field=False -) -> None: - """Check the correctness of the passed values for a field.""" - for value in accepted_values: - _harness.update_config({field: value}) - assert _harness.charm.config[field] == value if not is_long_field else int(value) - - -def check_invalid_values(_harness, field: str, erroneus_values: Iterable) -> None: - """Check the incorrectness of the passed values for a field.""" - with _harness.hooks_disabled(): - for value in erroneus_values: - _harness.update_config({field: value}) - with pytest.raises(ValueError): - _ = _harness.charm.config[field] - - -def test_product_related_values(harness) -> None: +def test_product_related_values() -> None: """Test specific parameters for each field.""" # log_message_timestamp_type field erroneus_values = ["test-value", "CreateTimes", "foo", "bar"] - check_invalid_values(harness, "log_message_timestamp_type", erroneus_values) + check_invalid_values("log_message_timestamp_type", erroneus_values) accepted_values = ["CreateTime", "LogAppendTime"] - check_valid_values(harness, "log_message_timestamp_type", accepted_values) + check_valid_values("log_message_timestamp_type", accepted_values) # log_cleanup_policy field - check_invalid_values(harness, "log_cleanup_policy", erroneus_values) + check_invalid_values("log_cleanup_policy", erroneus_values) accepted_values = ["compact", "delete"] - check_valid_values(harness, "log_cleanup_policy", accepted_values) + check_valid_values("log_cleanup_policy", accepted_values) # compression_type field - check_invalid_values(harness, "compression_type", erroneus_values) + check_invalid_values("compression_type", erroneus_values) accepted_values = ["gzip", "snappy", "lz4", "zstd", "uncompressed", "producer"] - check_valid_values(harness, "compression_type", accepted_values) + check_valid_values("compression_type", accepted_values) -def test_values_gt_zero(harness) -> None: +def test_values_gt_zero() -> None: """Check fields greater than zero.""" gt_zero_fields = ["log_flush_interval_messages", "log_flush_interval_ms"] erroneus_values = map(str, [0, -2147483649, -34]) valid_values = map(str, [42, 1000, 1, 9223372036854775807]) for field in gt_zero_fields: - check_invalid_values(harness, field, erroneus_values) - check_valid_values(harness, field, valid_values, is_long_field=True) + check_invalid_values(field, erroneus_values) + check_valid_values(field, valid_values) -def test_values_gteq_zero(harness) -> None: +def test_values_gteq_zero() -> None: """Check fields greater or equal than zero.""" gteq_zero_fields = [ "replication_quota_window_num", @@ -109,27 +123,23 @@ def test_values_gteq_zero(harness) -> None: erroneus_values = [-2147483649, -34] valid_values = [42, 1000, 1, 0] for field in gteq_zero_fields: - check_invalid_values(harness, field, erroneus_values) - check_valid_values(harness, field, valid_values) + check_invalid_values(field, erroneus_values) + check_valid_values(field, valid_values) -def test_values_in_specific_intervals(harness) -> None: +def test_values_in_specific_intervals() -> None: """Check fields on predefined intervals.""" # "log_cleaner_delete_retention_ms" erroneus_values = map(str, [-1, 0, 1000 * 60 * 60 * 24 * 90 + 1]) valid_values = map(str, [42, 1000, 10000, 1, 1000 * 60 * 60 * 24 * 90]) - check_invalid_values(harness, "log_cleaner_delete_retention_ms", erroneus_values) - check_valid_values( - harness, "log_cleaner_delete_retention_ms", valid_values, is_long_field=True - ) + check_invalid_values("log_cleaner_delete_retention_ms", erroneus_values) + check_valid_values("log_cleaner_delete_retention_ms", valid_values) # "log_cleaner_min_compaction_lag_ms" erroneus_values = map(str, [-1, 1000 * 60 * 60 * 24 * 7 + 1]) valid_values = map(str, [42, 1000, 10000, 1, 1000 * 60 * 60 * 24 * 7]) - check_invalid_values(harness, "log_cleaner_min_compaction_lag_ms", erroneus_values) - check_valid_values( - harness, "log_cleaner_min_compaction_lag_ms", valid_values, is_long_field=True - ) + check_invalid_values("log_cleaner_min_compaction_lag_ms", erroneus_values) + check_valid_values("log_cleaner_min_compaction_lag_ms", valid_values) partititions_fields = [ "transaction_state_log_num_partitions", @@ -138,11 +148,11 @@ def test_values_in_specific_intervals(harness) -> None: erroneus_values = [10001, -1] valid_values = [42, 1000, 10000, 1] for field in partititions_fields: - check_invalid_values(harness, field, erroneus_values) - check_valid_values(harness, field, valid_values) + check_invalid_values(field, erroneus_values) + check_valid_values(field, valid_values) -def test_config_parsing_parameters_long_values(harness) -> None: +def test_config_parsing_parameters_long_values() -> None: """Check long fields are parsed correctly.""" long_fields = [ "log_flush_interval_messages", @@ -155,14 +165,23 @@ def test_config_parsing_parameters_long_values(harness) -> None: erroneus_values = map(str, [-9223372036854775808, 9223372036854775809]) valid_values = map(str, [42, 1000, 9223372036854775808]) for field in long_fields: - check_invalid_values(harness, field, erroneus_values) - check_valid_values(harness, field, valid_values, is_long_field=True) + check_invalid_values(field, erroneus_values) + check_valid_values(field, valid_values) -def test_incorrect_roles(harness): +def test_incorrect_roles(): erroneus_values = ["", "something_else" "broker, something_else" "broker,balancer,"] valid_values = ["broker", "balancer", "balancer,broker", "broker, balancer "] - check_invalid_values(harness, "roles", erroneus_values) - for value in valid_values: - harness.update_config({"roles": value}) - assert harness.charm.config.roles + check_invalid_values("roles", erroneus_values) + check_valid_values("roles", valid_values) + + +def test_incorrect_extra_listeners(): + erroneus_values = [ + "missing.port", + "low.port:15000", + "high.port:60000", + "non.unique:30000,other.non.unique:30000", + "close.port:30000,other.close.port:30001", + ] + check_invalid_values("extra_listeners", erroneus_values) diff --git a/tests/unit/test_tls.py b/tests/unit/test_tls.py index 62db76d6..c5172af1 100644 --- a/tests/unit/test_tls.py +++ b/tests/unit/test_tls.py @@ -2,156 +2,167 @@ # Copyright 2023 Canonical Ltd. # See LICENSE file for licensing details. +import dataclasses +import json +import logging import socket from pathlib import Path +from typing import cast from unittest.mock import PropertyMock, patch import pytest import yaml -from ops.model import ActiveStatus -from ops.testing import Harness +from ops.testing import Container, Context, PeerRelation, Relation, State from charm import KafkaCharm -from literals import CHARM_KEY, CONTAINER, PEER, SUBSTRATE, ZK +from literals import ( + CHARM_KEY, + CONTAINER, + PEER, + SUBSTRATE, +) pytestmark = pytest.mark.broker -CONFIG = str(yaml.safe_load(Path("./config.yaml").read_text())) -ACTIONS = str(yaml.safe_load(Path("./actions.yaml").read_text())) -METADATA = str(yaml.safe_load(Path("./metadata.yaml").read_text())) +logger = logging.getLogger(__name__) -@pytest.fixture -def harness(): - harness = Harness(KafkaCharm, meta=METADATA, actions=ACTIONS, config=CONFIG) +CONFIG = yaml.safe_load(Path("./config.yaml").read_text()) +ACTIONS = yaml.safe_load(Path("./actions.yaml").read_text()) +METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) + + +@pytest.fixture() +def base_state(): if SUBSTRATE == "k8s": - harness.set_can_connect(CONTAINER, True) - - harness.add_relation("restart", CHARM_KEY) - harness._update_config( - { - "log_retention_ms": "-1", - "compression_type": "producer", - "expose-external": "none", - } - ) - harness.begin() - - # Relate to ZK with tls enabled - zk_relation_id = harness.add_relation(ZK, CHARM_KEY) - harness.update_relation_data( - zk_relation_id, - harness.charm.app.name, - { - "database": "/kafka", - "chroot": "/kafka", - "username": "moria", - "password": "mellon", - "endpoints": "1.1.1.1,2.2.2.2", - "uris": "1.1.1.1:2181/kafka,2.2.2.2:2181/kafka", - "tls": "enabled", - }, - ) + state = State(leader=True, containers=[Container(name=CONTAINER, can_connect=True)]) - # Simulate data-integrator relation - client_relation_id = harness.add_relation("kafka-client", "app") - harness.update_relation_data(client_relation_id, "app", {"extra-user-roles": "admin,producer"}) - client_relation_id = harness.add_relation("kafka-client", "appii") - harness.update_relation_data( - client_relation_id, "appii", {"extra-user-roles": "admin,consumer"} - ) + else: + state = State(leader=True) - return harness + return state -def test_mtls_not_enabled_if_trusted_certificate_added_before_tls_relation( - harness: Harness[KafkaCharm], -): - # Create peer relation - peer_relation_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/1") - harness.update_relation_data( - peer_relation_id, f"{CHARM_KEY}/0", {"private-address": "treebeard"} - ) +@pytest.fixture() +def charm_configuration(): + """Enable direct mutation on configuration dict.""" + return json.loads(json.dumps(CONFIG)) - harness.set_leader(True) - harness.add_relation("trusted-certificate", "tls-one") - assert not harness.charm.state.cluster.mtls_enabled +@pytest.fixture() +def ctx() -> Context: + ctx = Context(KafkaCharm, meta=METADATA, config=CONFIG, actions=ACTIONS, unit_id=0) + return ctx -def test_mtls_flag_added(harness: Harness[KafkaCharm]): - # Create peer relation - peer_relation_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/1") - harness.update_relation_data( - peer_relation_id, f"{CHARM_KEY}/0", {"private-address": "treebeard"} +def test_mtls_not_enabled_if_trusted_certificate_added_before_tls_relation( + ctx: Context, base_state: State +) -> None: + # Given + cluster_peer = PeerRelation(PEER, PEER) + cert_relation = Relation("trusted-certificate", "tls-one") + state_in = dataclasses.replace(base_state, relations=[cluster_peer, cert_relation]) + + # When + state_out = ctx.run(ctx.on.relation_created(cert_relation), state_in) + + # Then + assert ( + state_out.get_relation(cluster_peer.id).local_app_data.get("mtls", "disabled") != "enabled" ) - harness.update_relation_data(peer_relation_id, CHARM_KEY, {"tls": "enabled"}) - harness.set_leader(True) - harness.add_relation("trusted-certificate", "tls-one") - assert harness.charm.state.cluster.mtls_enabled - assert isinstance(harness.charm.app.status, ActiveStatus) +def test_mtls_added(ctx: Context, base_state: State) -> None: + # Given + cluster_peer = PeerRelation( + PEER, + PEER, + local_app_data={"tls": "enabled"}, + local_unit_data={"private-address": "treebeard"}, + ) + cert_relation = Relation("trusted-certificate", "tls-one") + state_in = dataclasses.replace(base_state, relations=[cluster_peer, cert_relation]) + # Given + state_out = ctx.run(ctx.on.relation_created(cert_relation), state_in) -def test_extra_sans_config(harness: Harness[KafkaCharm]): - # Create peer relation - peer_relation_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/0") - harness.update_relation_data( - peer_relation_id, f"{CHARM_KEY}/0", {"private-address": "treebeard"} + # Then + assert ( + state_out.get_relation(cluster_peer.id).local_app_data.get("mtls", "disabled") == "enabled" ) - manager = harness.charm.broker.tls_manager - harness._update_config({"certificate_extra_sans": ""}) - manager.config = harness.charm.config - assert manager._build_extra_sans() == [] +@pytest.mark.parametrize( + ["config_option", "extra_sans", "expected"], + [ + ("certificate_extra_sans", "", []), + ("certificate_extra_sans", "worker{unit}.com", ["worker0.com"]), + ( + "certificate_extra_sans", + "worker{unit}.com,{unit}.example", + ["worker0.com", "0.example"], + ), + ( + "extra_listeners", + "worker{unit}.com:30000,{unit}.example:40000,nonunit.domain.com:45000", + ["worker0.com", "0.example", "nonunit.domain.com"], + ), + ], +) +def test_extra_sans_config( + charm_configuration: dict, + base_state: State, + config_option: str, + extra_sans: str, + expected: list[str], +) -> None: + # Given + charm_configuration["options"][config_option]["default"] = extra_sans + cluster_peer = PeerRelation( + PEER, + PEER, + local_unit_data={"private-address": "treebeard"}, + ) + state_in = dataclasses.replace(base_state, relations=[cluster_peer]) + ctx = Context( + KafkaCharm, meta=METADATA, config=charm_configuration, actions=ACTIONS, unit_id=0 + ) - harness._update_config({"certificate_extra_sans": "worker{unit}.com"}) - manager.config = harness.charm.config - assert "worker0.com" in "".join(manager._build_extra_sans()) + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) - harness._update_config({"certificate_extra_sans": "worker{unit}.com,{unit}.example"}) - manager.config = harness.charm.config - assert "worker0.com" in "".join(manager._build_extra_sans()) - assert "0.example" in "".join(manager._build_extra_sans()) + # Then + assert charm.broker.tls_manager._build_extra_sans() == expected - # verifying that sans can be built with both certificate_extra_sans and extra_listeners - harness._update_config( - { - "certificate_extra_sans": "", - "extra_listeners": "worker{unit}.com:30000,{unit}.example:40000,nonunit.domain.com:45000", - } + +def test_sans(charm_configuration: dict, base_state: State, patched_node_ip) -> None: + # Given + charm_configuration["options"]["certificate_extra_sans"]["default"] = "worker{unit}.com" + cluster_peer = PeerRelation( + PEER, + PEER, + local_unit_data={"private-address": "treebeard"}, ) - manager.config = harness.charm.config - assert manager._build_extra_sans - assert "worker0.com" in "".join(manager._build_extra_sans()) - assert "0.example" in "".join(manager._build_extra_sans()) - assert "nonunit.domain.com" in "".join(manager._build_extra_sans()) - - -def test_sans(harness: Harness[KafkaCharm], patched_node_ip): - # Create peer relation - peer_relation_id = harness.add_relation(PEER, CHARM_KEY) - harness.add_relation_unit(peer_relation_id, f"{CHARM_KEY}/0") - harness.update_relation_data( - peer_relation_id, f"{CHARM_KEY}/0", {"private-address": "treebeard"} + state_in = dataclasses.replace(base_state, relations=[cluster_peer]) + ctx = Context( + KafkaCharm, meta=METADATA, config=charm_configuration, actions=ACTIONS, unit_id=0 ) - - manager = harness.charm.broker.tls_manager - harness.update_config({"certificate_extra_sans": "worker{unit}.com"}) - manager.config = harness.charm.config - sock_dns = socket.getfqdn() + + # When if SUBSTRATE == "vm": - assert manager.build_sans() == { + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) + built_sans = charm.broker.tls_manager.build_sans() + + # Then + assert built_sans == { "sans_ip": ["treebeard"], "sans_dns": [f"{CHARM_KEY}/0", sock_dns, "worker0.com"], } + elif SUBSTRATE == "k8s": # NOTE previous k8s sans_ip like kafka-k8s-0.kafka-k8s-endpoints or binding pod address with ( @@ -161,13 +172,18 @@ def test_sans(harness: Harness[KafkaCharm], patched_node_ip): new_callable=PropertyMock, return_value="palantir", ), + ctx(ctx.on.config_changed(), state_in) as manager, ): - assert sorted(manager.build_sans()["sans_dns"]) == sorted( - [ - "kafka-k8s-0", - "kafka-k8s-0.kafka-k8s-endpoints", - sock_dns, - "worker0.com", - ] - ) - assert "palantir" in "".join(manager.build_sans()["sans_ip"]) + charm = cast(KafkaCharm, manager.charm) + built_sans = charm.broker.tls_manager.build_sans() + + # Then + assert sorted(built_sans["sans_dns"]) == sorted( + [ + "kafka-k8s-0", + "kafka-k8s-0.kafka-k8s-endpoints", + sock_dns, + "worker0.com", + ] + ) + assert "palantir" in "".join(built_sans["sans_ip"]) diff --git a/tests/unit/test_upgrade.py b/tests/unit/test_upgrade.py index 10042f9e..d4e6ba0c 100644 --- a/tests/unit/test_upgrade.py +++ b/tests/unit/test_upgrade.py @@ -2,27 +2,55 @@ # Copyright 2023 Canonical Ltd. # See LICENSE file for licensing details. +import dataclasses +import json import logging from pathlib import Path +from typing import cast from unittest.mock import MagicMock, PropertyMock, patch import pytest import yaml from charms.data_platform_libs.v0.upgrade import ClusterNotReadyError, DependencyModel from kazoo.client import KazooClient -from ops.testing import Harness +from ops.testing import ActionFailed, Container, Context, PeerRelation, State from charm import KafkaCharm from events.upgrade import KafkaDependencyModel -from literals import CHARM_KEY, CONTAINER, DEPENDENCIES, PEER, SUBSTRATE, ZK +from literals import CONTAINER, DEPENDENCIES, PEER, SUBSTRATE logger = logging.getLogger(__name__) pytestmark = pytest.mark.broker -CONFIG = str(yaml.safe_load(Path("./config.yaml").read_text())) -ACTIONS = str(yaml.safe_load(Path("./actions.yaml").read_text())) -METADATA = str(yaml.safe_load(Path("./metadata.yaml").read_text())) + +CONFIG = yaml.safe_load(Path("./config.yaml").read_text()) +ACTIONS = yaml.safe_load(Path("./actions.yaml").read_text()) +METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) + + +@pytest.fixture() +def charm_configuration(): + """Enable direct mutation on configuration dict.""" + return json.loads(json.dumps(CONFIG)) + + +@pytest.fixture() +def base_state(): + + if SUBSTRATE == "k8s": + state = State(leader=True, containers=[Container(name=CONTAINER, can_connect=True)]) + + else: + state = State(leader=True) + + return state + + +@pytest.fixture() +def ctx() -> Context: + ctx = Context(KafkaCharm, meta=METADATA, config=CONFIG, actions=ACTIONS, unit_id=0) + return ctx @pytest.fixture() @@ -33,92 +61,80 @@ def upgrade_func() -> str: return "_on_upgrade_granted" -@pytest.fixture -def harness(zk_data): - harness = Harness(KafkaCharm, meta=METADATA, config=CONFIG, actions=ACTIONS) - harness.add_relation("restart", CHARM_KEY) - harness.add_relation("upgrade", CHARM_KEY) - - if SUBSTRATE == "k8s": - harness.set_can_connect(CONTAINER, True) - - peer_rel_id = harness.add_relation(PEER, CHARM_KEY) - zk_rel_id = harness.add_relation(ZK, ZK) - harness._update_config( - { - "log_retention_ms": "-1", - "compression_type": "producer", - "expose-external": "none", - } - ) - harness.begin() - with harness.hooks_disabled(): - harness.add_relation_unit(peer_rel_id, f"{CHARM_KEY}/0") - harness.update_relation_data( - peer_rel_id, f"{CHARM_KEY}/0", {"private-address": "000.000.000"} - ) - harness.update_relation_data(zk_rel_id, ZK, zk_data) +def test_pre_upgrade_check_raises_not_stable(ctx: Context, base_state: State) -> None: + # Given + state_in = base_state - return harness + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) + with pytest.raises(ClusterNotReadyError): + charm.broker.upgrade.pre_upgrade_check() -def test_pre_upgrade_check_raises_not_stable(harness: Harness[KafkaCharm]): - with pytest.raises(ClusterNotReadyError): - harness.charm.broker.upgrade.pre_upgrade_check() +def test_pre_upgrade_check_succeeds(ctx: Context, base_state: State) -> None: + # Given + state_in = base_state -def test_pre_upgrade_check_succeeds(harness: Harness[KafkaCharm]): + # When with ( patch("events.broker.BrokerOperator.healthy", return_value=True), patch("events.upgrade.KafkaUpgrade._set_rolling_update_partition"), + ctx(ctx.on.config_changed(), state_in) as manager, ): - harness.charm.broker.upgrade.pre_upgrade_check() + charm = cast(KafkaCharm, manager.charm) + + # Then + charm.broker.upgrade.pre_upgrade_check() @pytest.mark.skipif(SUBSTRATE == "k8s", reason="upgrade stack not used on K8s") -def test_build_upgrade_stack(harness: Harness[KafkaCharm]): - with harness.hooks_disabled(): - harness.add_relation_unit(harness.charm.state.peer_relation.id, f"{CHARM_KEY}/1") - harness.update_relation_data( - harness.charm.state.peer_relation.id, - f"{CHARM_KEY}/1", - {"private-address": "111.111.111"}, - ) - harness.add_relation_unit(harness.charm.state.peer_relation.id, f"{CHARM_KEY}/2") - harness.update_relation_data( - harness.charm.state.peer_relation.id, - f"{CHARM_KEY}/2", - {"private-address": "222.222.222"}, - ) - - stack = harness.charm.broker.upgrade.build_upgrade_stack() +def test_build_upgrade_stack(ctx: Context, base_state: State) -> None: + # Given + cluster_peer = PeerRelation( + PEER, + PEER, + local_unit_data={"private-address": "000.000.000"}, + peers_data={1: {"private-address": "111.111.111"}, 2: {"private-address": "222.222.222"}}, + ) + state_in = dataclasses.replace(base_state, relations=[cluster_peer]) + # When + with ctx(ctx.on.config_changed(), state_in) as manager: + charm = cast(KafkaCharm, manager.charm) + stack = charm.broker.upgrade.build_upgrade_stack() + + # Then assert len(stack) == 3 assert len(stack) == len(set(stack)) @pytest.mark.parametrize("upgrade_stack", ([], [0])) -def test_run_password_rotation_while_upgrading(harness: Harness[KafkaCharm], upgrade_stack): - harness.charm.broker.upgrade.upgrade_stack = upgrade_stack - harness.set_leader(True) - - mock_event = MagicMock() - mock_event.params = {"username": "admin"} +def test_run_password_rotation_while_upgrading( + ctx: Context, base_state: State, upgrade_stack +) -> None: + # Given + state_in = base_state + # When with ( patch( "events.broker.BrokerOperator.healthy", new_callable=PropertyMock, return_value=True ), patch("managers.auth.AuthManager.add_user"), + patch( + "charms.data_platform_libs.v0.upgrade.DataUpgrade.upgrade_stack", + new_callable=PropertyMock, + return_value=upgrade_stack, + ), ): - harness.charm.broker.action_events._set_password_action(mock_event) + if not upgrade_stack: + ctx.run(ctx.on.action("set-password", params={"username": "admin"}), state_in) - if not upgrade_stack: - mock_event.set_results.assert_called() - else: - mock_event.fail.assert_called_with( - f"Cannot set password while upgrading (upgrade_stack: {upgrade_stack})" - ) + else: + with pytest.raises(ActionFailed, match="Cannot set password while upgrading"): + ctx.run(ctx.on.action("set-password", params={"username": "admin"}), state_in) def test_kafka_dependency_model(): @@ -129,11 +145,12 @@ def test_kafka_dependency_model(): def test_upgrade_granted_sets_failed_if_zookeeper_dependency_check_fails( - harness: Harness[KafkaCharm], upgrade_func: str + ctx: Context, base_state: State, upgrade_func: str ): - with harness.hooks_disabled(): - harness.set_leader(True) + # Given + state_in = base_state + # When with ( patch.object(KazooClient, "start"), patch( @@ -157,15 +174,25 @@ def test_upgrade_granted_sets_failed_if_zookeeper_dependency_check_fails( new_callable=PropertyMock, return_value=False, ), + patch( + "events.upgrade.KafkaUpgrade.set_unit_failed", + ) as patch_set_failed, + ctx(ctx.on.config_changed(), state_in) as manager, ): + charm = cast(KafkaCharm, manager.charm) mock_event = MagicMock() - getattr(harness.charm.broker.upgrade, upgrade_func)(mock_event) + getattr(charm.broker.upgrade, upgrade_func)(mock_event) - assert harness.charm.broker.upgrade.state == "failed" + # Then + assert patch_set_failed.call_count @pytest.mark.skipif(SUBSTRATE == "k8s", reason="Upgrade granted not used on K8s charms") -def test_upgrade_granted_sets_failed_if_failed_snap(harness: Harness[KafkaCharm]): +def test_upgrade_granted_sets_failed_if_failed_snap(ctx: Context, base_state: State) -> None: + # Given + state_in = base_state + + # Then with ( patch( "events.upgrade.KafkaUpgrade.zookeeper_current_version", @@ -175,17 +202,27 @@ def test_upgrade_granted_sets_failed_if_failed_snap(harness: Harness[KafkaCharm] patch("workload.KafkaWorkload.stop") as patched_stop, patch("workload.BalancerWorkload.stop"), patch("workload.KafkaWorkload.install", return_value=False), + patch( + "events.upgrade.KafkaUpgrade.set_unit_failed", + ) as patch_set_failed, + ctx(ctx.on.config_changed(), state_in) as manager, ): + charm = cast(KafkaCharm, manager.charm) mock_event = MagicMock() - harness.charm.broker.upgrade._on_upgrade_granted(mock_event) + charm.broker.upgrade._on_upgrade_granted(mock_event) + # Then patched_stop.assert_called_once() - assert harness.charm.broker.upgrade.state == "failed" + assert patch_set_failed.call_count def test_upgrade_sets_failed_if_failed_upgrade_check( - harness: Harness[KafkaCharm], upgrade_func: str -): + ctx: Context, base_state: State, upgrade_func: str +) -> None: + # Given + state_in = base_state + + # When with ( patch( "core.models.ZooKeeper.zookeeper_version", @@ -215,15 +252,25 @@ def test_upgrade_sets_failed_if_failed_upgrade_check( new_callable=PropertyMock, return_value=False, ), + patch( + "events.upgrade.KafkaUpgrade.set_unit_failed", + ) as patch_set_failed, + ctx(ctx.on.config_changed(), state_in) as manager, ): + charm = cast(KafkaCharm, manager.charm) mock_event = MagicMock() - getattr(harness.charm.broker.upgrade, upgrade_func)(mock_event) + getattr(charm.broker.upgrade, upgrade_func)(mock_event) + # Then assert patched_restart.call_count or patched_start.call_count - assert harness.charm.broker.upgrade.state == "failed" + assert patch_set_failed.call_count -def test_upgrade_succeeds(harness: Harness[KafkaCharm], upgrade_func: str): +def test_upgrade_succeeds(ctx: Context, base_state: State, upgrade_func: str) -> None: + # Given + state_in = base_state + + # When with ( patch( "core.models.ZooKeeper.zookeeper_version", @@ -258,19 +305,27 @@ def test_upgrade_succeeds(harness: Harness[KafkaCharm], upgrade_func: str): "core.models.ZooKeeper.broker_active", return_value=True, ), + patch( + "events.upgrade.KafkaUpgrade.set_unit_completed", + ) as patch_set_completed, + ctx(ctx.on.config_changed(), state_in) as manager, ): + charm = cast(KafkaCharm, manager.charm) mock_event = MagicMock() - getattr(harness.charm.broker.upgrade, upgrade_func)(mock_event) + getattr(charm.broker.upgrade, upgrade_func)(mock_event) assert patched_restart.call_count or patched_start.call_count - assert harness.charm.broker.upgrade.state == "completed" + assert patch_set_completed.call_count @pytest.mark.skipif(SUBSTRATE == "k8s", reason="Upgrade granted not used on K8s charms") -def test_upgrade_granted_recurses_upgrade_changed_on_leader(harness: Harness[KafkaCharm]): - with harness.hooks_disabled(): - harness.set_leader(True) +def test_upgrade_granted_recurses_upgrade_changed_on_leader( + ctx: Context, base_state: State +) -> None: + # Given + state_in = base_state + # When with ( patch( "events.upgrade.KafkaUpgrade.zookeeper_current_version", @@ -285,9 +340,12 @@ def test_upgrade_granted_recurses_upgrade_changed_on_leader(harness: Harness[Kaf "events.broker.BrokerOperator.healthy", new_callable=PropertyMock, return_value=True ), patch("workload.BalancerWorkload.stop"), - patch("events.upgrade.KafkaUpgrade.on_upgrade_changed") as patched_upgrade, + patch("events.upgrade.KafkaUpgrade.on_upgrade_changed", autospec=True) as patched_upgrade, + ctx(ctx.on.config_changed(), state_in) as manager, ): + charm = cast(KafkaCharm, manager.charm) mock_event = MagicMock() - harness.charm.broker.upgrade._on_upgrade_granted(mock_event) + charm.broker.upgrade._on_upgrade_granted(mock_event) + # Then patched_upgrade.assert_called_once()