diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 51efbf1fd8..862f5edd8b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -90,14 +90,10 @@ jobs: - name: Run tests run: cargo nextest run --exclude zenoh-examples --exclude zenoh-plugin-example --workspace - env: - ASYNC_STD_THREAD_COUNT: 4 - name: Run tests with SHM if: ${{ matrix.os == 'ubuntu-latest' }} run: cargo nextest run -F shared-memory -F transport_unixpipe -p zenoh-transport - env: - ASYNC_STD_THREAD_COUNT: 4 - name: Check for feature leaks if: ${{ matrix.os == 'ubuntu-latest' }} @@ -105,8 +101,6 @@ jobs: - name: Run doctests run: cargo test --doc - env: - ASYNC_STD_THREAD_COUNT: 4 # NOTE: In GitHub repository settings, the "Require status checks to pass # before merging" branch protection rule ensures that commits are only merged diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8c364ca881..b1463398ed 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -160,22 +160,373 @@ jobs: linux/amd64 secrets: inherit - ghcr: - name: Publish container image to GitHub Container Registry - needs: [tag, build-standalone] - uses: eclipse-zenoh/ci/.github/workflows/release-crates-ghcr.yml@main - with: - no-build: true - live-run: ${{ inputs.live-run }} - version: ${{ needs.tag.outputs.version }} - repo: ${{ github.repository }} - tags: "${{ github.repository }}:${{ needs.tag.outputs.version }}" - binary: zenohd - files: | - zenohd - libzenoh_plugin_rest.so - libzenoh_plugin_storage_manager.so - platforms: | - linux/arm64 - linux/amd64 - secrets: inherit + ZENOH_VERSION=$(cargo metadata --no-deps --format-version 1 | jq -r '.packages[] | select(.name == "zenoh") | .version') + echo "ZENOH_VERSION=${ZENOH_VERSION}" >> $GITHUB_OUTPUT + if [ -n "${GIT_TAG}" ]; then + IS_RELEASE="true" + echo "IS_RELEASE=${IS_RELEASE}" >> $GITHUB_OUTPUT + PKG_VERSION=${ZENOH_VERSION} + elif [ -n "${GIT_BRANCH}" ]; then + PKG_VERSION=${GIT_BRANCH}-${GITHUB_SHA:0:8} + else + PKG_VERSION=${ZENOH_VERSION}-${GITHUB_SHA:0:8} + fi + echo "PKG_VERSION=${PKG_VERSION}" >> $GITHUB_OUTPUT + echo "GITHUB_OUTPUT : ${GITHUB_OUTPUT}" + outputs: + GIT_BRANCH: ${{ steps.env.outputs.GIT_BRANCH }} + GIT_TAG: ${{ steps.env.outputs.GIT_TAG }} + IS_RELEASE: ${{ steps.env.outputs.IS_RELEASE }} + ZENOH_VERSION: ${{ steps.env.outputs.ZENOH_VERSION }} + PKG_VERSION: ${{ steps.env.outputs.PKG_VERSION }} + + tests: + name: Tests + if: ${{ !(github.event.inputs.runtests == 'false') }} + needs: checks + runs-on: ubuntu-latest + steps: + - name: Clone this repository + uses: actions/checkout@v4 + + - name: Install Rust toolchain + run: rustup show + + - name: Install nextest + run: cargo install --version ${{ env.VERSION_CARGO_NEXTEST }} --locked cargo-nextest + env: + CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse + + - name: Run tests + run: cargo nextest run --release --features=${{ github.event.inputs.features}} --verbose + env: + CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse + + - name: Run doctests + run: cargo test --release --features=${{ github.event.inputs.features}} --doc + env: + CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse + + doc: + name: Doc generation + if: ${{ !(github.event.inputs.doc == 'false') }} + needs: checks + runs-on: ubuntu-latest + steps: + - name: Clone this repository + uses: actions/checkout@v4 + + # Use a similar command than docs.rs build: rustdoc with nightly toolchain + - name: Install Rust toolchain nightly for docs gen + run: rustup toolchain install nightly + + - name: generate doc + # NOTE: force 'unstable' feature for doc generation, as forced for docs.rs build in zenoh/Cargo.toml + run: > + cargo +nightly rustdoc --manifest-path ./zenoh/Cargo.toml --lib --features unstable -j3 + -Z rustdoc-map -Z unstable-options -Z rustdoc-scrape-examples + --config build.rustdocflags='["-Z", "unstable-options", "--emit=invocation-specific", "--cap-lints", "warn", "--disable-per-crate-search", "--extern-html-root-takes-precedence"]' + env: + RUSTDOCFLAGS: -Dwarnings + + builds: + name: Build for ${{ matrix.job.target }} on ${{ matrix.job.os }} + if: ${{ !(github.event.inputs.build == 'false') }} + needs: checks + runs-on: ${{ matrix.job.os }} + strategy: + fail-fast: false + matrix: + job: + - { + target: x86_64-unknown-linux-gnu, + arch: amd64, + os: ubuntu-20.04, + build-cmd: "cargo", + } + - { + target: x86_64-unknown-linux-musl, + arch: amd64, + os: ubuntu-20.04, + build-cmd: "cross", + } + - { + target: arm-unknown-linux-gnueabi, + arch: armel, + os: ubuntu-20.04, + build-cmd: "cross", + } + - { + target: arm-unknown-linux-gnueabihf, + arch: armhf, + os: ubuntu-20.04, + build-cmd: "cross", + } + - { + target: armv7-unknown-linux-gnueabihf, + arch: armhf, + os: ubuntu-20.04, + build-cmd: "cross", + } + - { + target: aarch64-unknown-linux-gnu, + arch: arm64, + os: ubuntu-20.04, + build-cmd: "cross", + } + - { + target: aarch64-unknown-linux-musl, + arch: arm64, + os: ubuntu-20.04, + build-cmd: "cross", + } + - { + target: x86_64-apple-darwin, + arch: darwin, + os: macos-latest, + build-cmd: "cargo", + } + - { + target: aarch64-apple-darwin, + arch: darwin, + os: macos-latest, + build-cmd: "cargo", + } + - { + target: x86_64-pc-windows-msvc, + arch: win64, + os: windows-2019, + build-cmd: "cargo", + } + # - { target: x86_64-pc-windows-gnu , arch: win64 , os: windows-2019 } + steps: + - name: Checkout source code + uses: actions/checkout@v4 + with: + fetch-depth: 500 # NOTE: get long history for git-version crate to correctly compute a version + + - name: Fetch Git tags # NOTE: workaround for https://github.com/actions/checkout/issues/290 + shell: bash + run: git fetch --tags --force + + - name: Install prerequisites + shell: bash + run: | + case ${{ matrix.job.target }} in + *-linux-gnu*) cargo install --version ${{ env.VERSION_CARGO_DEB }} cargo-deb ;; + esac + + case ${{ matrix.job.target }} in + arm-unknown-linux-gnueabi) + sudo apt-get -y update + sudo apt-get -y install gcc-arm-linux-gnueabi + ;; + arm*-unknown-linux-gnueabihf) + sudo apt-get -y update + sudo apt-get -y install gcc-arm-linux-gnueabihf + ;; + aarch64-unknown-linux-gnu) + sudo apt-get -y update + sudo apt-get -y install gcc-aarch64-linux-gnu + ;; + esac + + cargo install --version ${{ env.VERSION_CROSS }} cross + + - name: Install Rust toolchain + run: | + rustup show + rustup target add ${{ matrix.job.target }} + + - name: Build + run: ${{ matrix.job.build-cmd }} build --release --bins --lib --features=${{ github.event.inputs.features}} --target=${{ matrix.job.target }} + + - name: Debian package - zenohd + if: contains(matrix.job.target, '-linux-gnu') + run: cargo deb --no-build --target=${{ matrix.job.target }} -p zenohd + env: + CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse + + - name: Debian package - zenoh-plugin-storage-manager + if: contains(matrix.job.target, '-linux-gnu') + run: cargo deb --no-build --target=${{ matrix.job.target }} -p zenoh-plugin-storage-manager + env: + CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse + + - name: Debian package - zenoh-plugin-rest + if: contains(matrix.job.target, '-linux-gnu') + run: cargo deb --no-build --target=${{ matrix.job.target }} -p zenoh-plugin-rest + env: + CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse + + - name: Packaging + id: package + shell: bash + run: | + TARGET=${{ matrix.job.target }} + MAIN_PKG_NAME="${GITHUB_WORKSPACE}/zenoh-${{ needs.checks.outputs.PKG_VERSION }}-${TARGET}.zip" + DEBS_PKG_NAME="${GITHUB_WORKSPACE}/zenoh-${{ needs.checks.outputs.PKG_VERSION }}-${TARGET}-deb-pkgs.zip" + + case ${TARGET} in + *linux*) + cd "target/${TARGET}/release/" + echo "Packaging ${MAIN_PKG_NAME}:" + zip ${MAIN_PKG_NAME} zenohd libzenoh_plugin_*.so + cd - + echo "MAIN_PKG_NAME=${MAIN_PKG_NAME}" >> $GITHUB_OUTPUT + + # check if debian packages has been created and packages them in a single tgz + if [[ -d target/${TARGET}/debian ]]; then + cd target/${TARGET}/debian + echo "Packaging ${DEBS_PKG_NAME}:" + zip ${DEBS_PKG_NAME} *.deb + cd - + echo "DEBS_PKG_NAME=${DEBS_PKG_NAME}" >> $GITHUB_OUTPUT + fi + ;; + *apple*) + cd "target/${TARGET}/release/" + echo "Packaging ${MAIN_PKG_NAME}:" + zip ${MAIN_PKG_NAME} zenohd libzenoh_plugin_*.dylib + cd - + echo "MAIN_PKG_NAME=${MAIN_PKG_NAME}" >> $GITHUB_OUTPUT + ;; + *windows*) + cd "target/${TARGET}/release/" + echo "Packaging ${MAIN_PKG_NAME}:" + 7z -y a "${MAIN_PKG_NAME}" zenohd.exe zenoh_plugin_*.dll + cd - + echo "MAIN_PKG_NAME=${MAIN_PKG_NAME}" >> $GITHUB_OUTPUT + ;; + esac + + - name: "Upload packages" + uses: actions/upload-artifact@v3 + with: + name: ${{ matrix.job.target }} + path: | + ${{ steps.package.outputs.MAIN_PKG_NAME }} + ${{ steps.package.outputs.DEBS_PKG_NAME }} + + publication: + name: Release publication + if: ${{ (needs.checks.outputs.IS_RELEASE == 'true' || github.event.inputs.publish == 'true') && !failure() }} + needs: [checks, builds, tests, doc] + runs-on: ubuntu-latest + steps: + - name: Download result of previous builds + uses: actions/download-artifact@v3 + with: + path: ARTIFACTS + + - name: Publish as github release + if: ${{ !(github.event.inputs.githubrelease == 'false') }} + uses: softprops/action-gh-release@v1 + with: + files: ARTIFACTS/*/*.* + + - name: Publish to download.eclipse.org/zenoh + if: ${{ !(github.event.inputs.eclipse == 'false') }} + env: + SSH_TARGET: genie.zenoh@projects-storage.eclipse.org + ECLIPSE_BASE_DIR: /home/data/httpd/download.eclipse.org/zenoh + shell: bash + run: | + echo "--- setup ssh-agent" + eval "$(ssh-agent -s)" + echo 'echo "${{ secrets.SSH_PASSPHRASE }}"' > ~/.ssh_askpass && chmod +x ~/.ssh_askpass + echo "${{ secrets.SSH_PRIVATE_KEY }}" | tr -d '\r' | DISPLAY=NONE SSH_ASKPASS=~/.ssh_askpass ssh-add - > /dev/null 2>&1 + rm -f ~/.ssh_askpass + echo "--- test ssh:" + ssh -o "StrictHostKeyChecking=no" ${SSH_TARGET} ls -al + echo "---- list artifacts to upload:" + ls -R ARTIFACTS || true + DOWNLOAD_DIR=${ECLIPSE_BASE_DIR}/zenoh/${{ needs.checks.outputs.ZENOH_VERSION }} + echo "---- copy artifacts into ${DOWNLOAD_DIR}" + ssh -o "StrictHostKeyChecking=no" ${SSH_TARGET} mkdir -p ${DOWNLOAD_DIR} + cd ARTIFACTS + sha256sum */* > sha256sums.txt + scp -o "StrictHostKeyChecking=no" -r * ${SSH_TARGET}:${DOWNLOAD_DIR}/ + echo "---- cleanup identity" + ssh-add -D + + - name: Checkout this repository + uses: actions/checkout@v4 + + - name: Install Rust toolchain + if: ${{ !(github.event.inputs.cratesio == 'false') }} + run: rustup show + + - name: Check crates + if: ${{ !(github.event.inputs.cratesio == 'false') }} + shell: bash + run: .github/workflows/crates_check.sh + + - name: Publish to crates.io + if: ${{ !(github.event.inputs.cratesio == 'false') }} + shell: bash + run: | + set +x + .github/workflows/crates_publish.sh ${{ secrets.CRATES_IO_TOKEN }} + + - name: Cancel workflow if fail # thus Docker job be interrupted + if: failure() + uses: andymckay/cancel-action@0.2 + + docker: + name: Docker build and push + if: ${{ !(github.event.inputs.dockerhub == 'false') && !failure() }} + needs: [checks, builds, tests, doc] + runs-on: ubuntu-latest + steps: + - name: Checkout this repository + uses: actions/checkout@v4 + with: + fetch-depth: 500 # NOTE: get long history for git-version crate to correctly compute a version + + - name: Fetch Git tags # NOTE: workaround for https://github.com/actions/checkout/issues/290 + shell: bash + run: git fetch --tags --force + + - name: Download packages from previous job + uses: actions/download-artifact@v3 + with: + path: PACKAGES + + - name: Unzip PACKAGES + run: | + ls PACKAGES + mkdir -p docker/linux/amd + unzip PACKAGES/x86_64-unknown-linux-musl/zenoh-${{ needs.checks.outputs.PKG_VERSION }}-x86_64-unknown-linux-musl.zip -d docker/linux/amd64/ + rm docker/linux/amd64/libzenoh_plugin_example.so + mkdir -p docker/linux/arm64 + unzip PACKAGES/aarch64-unknown-linux-musl/zenoh-${{ needs.checks.outputs.PKG_VERSION }}-aarch64-unknown-linux-musl.zip -d docker/linux/arm64/ + rm docker/linux/arm64/libzenoh_plugin_example.so + tree docker + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Docker meta - set tags and labels + id: meta + uses: docker/metadata-action@v5 + with: + images: eclipse/zenoh + labels: | + org.opencontainers.image.licenses=EPL-2.0 OR Apache-2.0 + + - name: Login to DockerHub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_COM_USERNAME }} + password: ${{ secrets.DOCKER_COM_PASSWORD }} + + - name: Build and push + uses: docker/build-push-action@v5 + with: + context: . + push: true + platforms: linux/amd64, linux/arm64 + file: .github/workflows/Dockerfile + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} diff --git a/.gitignore b/.gitignore index 83421ea1ae..695d0464b1 100644 --- a/.gitignore +++ b/.gitignore @@ -19,4 +19,4 @@ .vscode -cargo-timing*.html \ No newline at end of file +cargo-timing*.html diff --git a/Cargo.lock b/Cargo.lock index 1d5fab2365..ba84d51ee8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -165,9 +165,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.4" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" +checksum = "96b09b5178381e0874812a9b157f7fe84982617e48f71f4e3235482775e5b540" dependencies = [ "anstyle", "anstyle-parse", @@ -179,9 +179,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.3" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84bf0a05bbb2a83e5eb6fa36bb6e87baa08193c35ff52bbf6b38d8af2890e46" +checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" [[package]] name = "anstyle-parse" @@ -359,16 +359,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "async-rustls" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29479d362e242e320fa8f5c831940a5b83c1679af014068196cd20d4bf497b6b" -dependencies = [ - "futures-io", - "rustls", -] - [[package]] name = "async-session" version = "2.0.1" @@ -1101,17 +1091,36 @@ version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +[[package]] +name = "encoding_rs" +version = "0.8.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +dependencies = [ + "cfg-if 1.0.0", +] + +[[package]] +name = "env_filter" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a009aa4810eb158359dda09d0c87378e4bbb89b5a801f016885a4707ba24f7ea" +dependencies = [ + "log", + "regex", +] + [[package]] name = "env_logger" -version = "0.10.0" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" +checksum = "6c012a26a7f605efc424dd53697843a72be7dc86ad2d01f7814337794a12231d" dependencies = [ + "anstream", + "anstyle", + "env_filter", "humantime", - "is-terminal", "log", - "regex", - "termcolor", ] [[package]] @@ -1454,6 +1463,25 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "h2" +version = "0.3.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 0.2.9", + "indexmap 1.9.3", + "slab", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "half" version = "1.8.2" @@ -1550,6 +1578,28 @@ dependencies = [ "itoa", ] +[[package]] +name = "http" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +dependencies = [ + "bytes", + "http 0.2.9", + "pin-project-lite 0.2.13", +] + [[package]] name = "http-client" version = "6.5.3" @@ -1596,6 +1646,30 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +[[package]] +name = "hyper" +version = "0.14.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http 0.2.9", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite 0.2.13", + "socket2 0.4.9", + "tokio", + "tower-service", + "tracing", + "want", +] + [[package]] name = "iana-time-zone" version = "0.1.57" @@ -1804,9 +1878,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.148" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libloading" @@ -1933,9 +2007,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.8" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" dependencies = [ "libc", "log", @@ -2469,7 +2543,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls", + "rustls 0.21.7", "thiserror", "tokio", "tracing", @@ -2485,7 +2559,7 @@ dependencies = [ "rand 0.8.5", "ring 0.16.20", "rustc-hash", - "rustls", + "rustls 0.21.7", "rustls-native-certs 0.6.3", "slab", "thiserror", @@ -2501,7 +2575,7 @@ checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" dependencies = [ "bytes", "libc", - "socket2 0.5.4", + "socket2 0.5.6", "tracing", "windows-sys 0.48.0", ] @@ -2678,6 +2752,40 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" +[[package]] +name = "reqwest" +version = "0.11.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" +dependencies = [ + "base64 0.21.4", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2", + "http 0.2.9", + "http-body", + "hyper", + "ipnet", + "js-sys", + "log", + "mime", + "once_cell", + "percent-encoding", + "pin-project-lite 0.2.13", + "serde", + "serde_json", + "serde_urlencoded", + "tokio", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "winreg", +] + [[package]] name = "ring" version = "0.16.20" @@ -2814,6 +2922,20 @@ dependencies = [ "sct", ] +[[package]] +name = "rustls" +version = "0.22.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" +dependencies = [ + "log", + "ring 0.17.6", + "rustls-pki-types", + "rustls-webpki 0.102.2", + "subtle", + "zeroize", +] + [[package]] name = "rustls-native-certs" version = "0.6.3" @@ -2860,9 +2982,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.0.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb0a1f9b9efec70d32e6d6aa3e58ebd88c3754ec98dfe9145c63cf54cc829b83" +checksum = "5ede67b28608b4c60685c7d54122d4400d90f62b40caee7700e700380a390fa8" [[package]] name = "rustls-webpki" @@ -2876,9 +2998,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.0" +version = "0.102.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de2635c8bc2b88d367767c5de8ea1d8db9af3f6219eba28442242d9ab81d1b89" +checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" dependencies = [ "ring 0.17.6", "rustls-pki-types", @@ -3254,12 +3376,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.4" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -3465,15 +3587,6 @@ dependencies = [ "unicode-ident", ] -[[package]] -name = "termcolor" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" -dependencies = [ - "winapi-util", -] - [[package]] name = "thiserror" version = "1.0.48" @@ -3618,9 +3731,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.32.0" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" +checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" dependencies = [ "backtrace", "bytes", @@ -3628,22 +3741,43 @@ dependencies = [ "mio", "num_cpus", "pin-project-lite 0.2.13", - "socket2 0.5.4", + "socket2 0.5.6", "tokio-macros", "windows-sys 0.48.0", ] [[package]] name = "tokio-macros" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", "syn 2.0.33", ] +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls 0.21.7", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls 0.22.2", + "rustls-pki-types", + "tokio", +] + [[package]] name = "tokio-serial" version = "5.4.4" @@ -3659,9 +3793,9 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b2dbec703c26b00d74844519606ef15d09a7d6857860f84ad223dec002ddea2" +checksum = "c83b561d025642014097b66e6c1bb422783339e0909e4429cde4749d1990bc38" dependencies = [ "futures-util", "log", @@ -3669,6 +3803,28 @@ dependencies = [ "tungstenite", ] +[[package]] +name = "tokio-util" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "futures-util", + "hashbrown 0.14.0", + "pin-project-lite 0.2.13", + "tokio", + "tracing", +] + +[[package]] +name = "tower-service" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" + [[package]] name = "tracing" version = "0.1.37" @@ -3704,14 +3860,14 @@ dependencies = [ [[package]] name = "tungstenite" -version = "0.20.1" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" +checksum = "9ef1a641ea34f399a848dea702823bbecfb4c486f911735368f1f137cb8257e1" dependencies = [ "byteorder", "bytes", "data-encoding", - "http", + "http 1.0.0", "httparse", "log", "rand 0.8.5", @@ -4309,8 +4465,6 @@ dependencies = [ name = "zenoh" version = "0.11.0-dev" dependencies = [ - "async-global-executor", - "async-std", "async-trait", "base64 0.21.4", "const_format", @@ -4330,8 +4484,10 @@ dependencies = [ "rustc_version 0.4.0", "serde", "serde_json", - "socket2 0.5.4", + "socket2 0.5.6", "stop-token", + "tokio", + "tokio-util", "uhlc", "uuid", "vec_map", @@ -4347,6 +4503,7 @@ dependencies = [ "zenoh-plugin-trait", "zenoh-protocol", "zenoh-result", + "zenoh-runtime", "zenoh-shm", "zenoh-sync", "zenoh-transport", @@ -4421,9 +4578,11 @@ dependencies = [ name = "zenoh-core" version = "0.11.0-dev" dependencies = [ - "async-std", + "async-global-executor", "lazy_static", + "tokio", "zenoh-result", + "zenoh-runtime", ] [[package]] @@ -4442,7 +4601,6 @@ dependencies = [ name = "zenoh-examples" version = "0.11.0-dev" dependencies = [ - "async-std", "clap", "env_logger", "flume", @@ -4452,6 +4610,7 @@ dependencies = [ "log", "rand 0.8.5", "rustc_version 0.4.0", + "tokio", "zenoh", "zenoh-ext", ] @@ -4460,7 +4619,6 @@ dependencies = [ name = "zenoh-ext" version = "0.11.0-dev" dependencies = [ - "async-std", "bincode", "clap", "env_logger", @@ -4468,6 +4626,7 @@ dependencies = [ "futures", "log", "serde", + "tokio", "zenoh", "zenoh-core", "zenoh-macros", @@ -4496,7 +4655,6 @@ dependencies = [ name = "zenoh-link" version = "0.11.0-dev" dependencies = [ - "async-std", "async-trait", "rcgen", "zenoh-config", @@ -4517,17 +4675,23 @@ dependencies = [ name = "zenoh-link-commons" version = "0.11.0-dev" dependencies = [ - "async-std", "async-trait", "flume", + "futures", "log", + "lz4_flex", + "rustls 0.22.2", + "rustls-webpki 0.102.2", "serde", + "tokio", + "tokio-util", + "typenum", "zenoh-buffers", "zenoh-codec", "zenoh-core", "zenoh-protocol", "zenoh-result", - "zenoh-sync", + "zenoh-runtime", "zenoh-util", ] @@ -4535,22 +4699,25 @@ dependencies = [ name = "zenoh-link-quic" version = "0.11.0-dev" dependencies = [ - "async-rustls", - "async-std", "async-trait", "base64 0.21.4", "futures", "log", "quinn", - "rustls", + "rustls 0.21.7", "rustls-native-certs 0.7.0", "rustls-pemfile 2.0.0", + "rustls-webpki 0.102.2", "secrecy", + "tokio", + "tokio-rustls 0.24.1", + "tokio-util", "zenoh-config", "zenoh-core", "zenoh-link-commons", "zenoh-protocol", "zenoh-result", + "zenoh-runtime", "zenoh-sync", "zenoh-util", ] @@ -4559,11 +4726,11 @@ dependencies = [ name = "zenoh-link-serial" version = "0.11.0-dev" dependencies = [ - "async-std", "async-trait", "futures", "log", "tokio", + "tokio-util", "uuid", "z-serial", "zenoh-collections", @@ -4571,6 +4738,7 @@ dependencies = [ "zenoh-link-commons", "zenoh-protocol", "zenoh-result", + "zenoh-runtime", "zenoh-sync", "zenoh-util", ] @@ -4579,13 +4747,15 @@ dependencies = [ name = "zenoh-link-tcp" version = "0.11.0-dev" dependencies = [ - "async-std", "async-trait", "log", + "tokio", + "tokio-util", "zenoh-core", "zenoh-link-commons", "zenoh-protocol", "zenoh-result", + "zenoh-runtime", "zenoh-sync", "zenoh-util", ] @@ -4594,22 +4764,25 @@ dependencies = [ name = "zenoh-link-tls" version = "0.11.0-dev" dependencies = [ - "async-rustls", - "async-std", "async-trait", "base64 0.21.4", "futures", "log", - "rustls", + "rustls 0.22.2", "rustls-pemfile 2.0.0", - "rustls-webpki 0.102.0", + "rustls-pki-types", + "rustls-webpki 0.102.2", "secrecy", + "tokio", + "tokio-rustls 0.25.0", + "tokio-util", "webpki-roots", "zenoh-config", "zenoh-core", "zenoh-link-commons", "zenoh-protocol", "zenoh-result", + "zenoh-runtime", "zenoh-sync", "zenoh-util", ] @@ -4618,16 +4791,18 @@ dependencies = [ name = "zenoh-link-udp" version = "0.11.0-dev" dependencies = [ - "async-std", "async-trait", "log", - "socket2 0.5.4", + "socket2 0.5.6", + "tokio", + "tokio-util", "zenoh-buffers", "zenoh-collections", "zenoh-core", "zenoh-link-commons", "zenoh-protocol", "zenoh-result", + "zenoh-runtime", "zenoh-sync", "zenoh-util", ] @@ -4637,13 +4812,13 @@ name = "zenoh-link-unixpipe" version = "0.11.0-dev" dependencies = [ "advisory-lock", - "async-io", - "async-std", "async-trait", "filepath", "log", "nix 0.27.1", "rand 0.8.5", + "tokio", + "tokio-util", "unix-named-pipe", "zenoh-buffers", "zenoh-config", @@ -4651,22 +4826,25 @@ dependencies = [ "zenoh-link-commons", "zenoh-protocol", "zenoh-result", + "zenoh-runtime", ] [[package]] name = "zenoh-link-unixsock_stream" version = "0.11.0-dev" dependencies = [ - "async-std", "async-trait", "futures", "log", "nix 0.27.1", + "tokio", + "tokio-util", "uuid", "zenoh-core", "zenoh-link-commons", "zenoh-protocol", "zenoh-result", + "zenoh-runtime", "zenoh-sync", ] @@ -4674,17 +4852,18 @@ dependencies = [ name = "zenoh-link-ws" version = "0.11.0-dev" dependencies = [ - "async-std", "async-trait", "futures-util", "log", "tokio", "tokio-tungstenite", + "tokio-util", "url", "zenoh-core", "zenoh-link-commons", "zenoh-protocol", "zenoh-result", + "zenoh-runtime", "zenoh-sync", "zenoh-util", ] @@ -4813,6 +4992,16 @@ dependencies = [ "anyhow", ] +[[package]] +name = "zenoh-runtime" +version = "0.11.0-dev" +dependencies = [ + "lazy_static", + "tokio", + "zenoh-collections", + "zenoh-result", +] + [[package]] name = "zenoh-shm" version = "0.11.0-dev" @@ -4828,25 +5017,25 @@ dependencies = [ name = "zenoh-sync" version = "0.11.0-dev" dependencies = [ - "async-std", "event-listener 4.0.0", "futures", "tokio", "zenoh-buffers", "zenoh-collections", "zenoh-core", + "zenoh-result", + "zenoh-runtime", ] [[package]] name = "zenoh-transport" version = "0.11.0-dev" dependencies = [ - "async-executor", - "async-global-executor", - "async-std", "async-trait", "env_logger", "flume", + "futures", + "futures-util", "log", "lz4_flex", "paste", @@ -4855,6 +5044,8 @@ dependencies = [ "rsa", "serde", "sha3", + "tokio", + "tokio-util", "zenoh-buffers", "zenoh-codec", "zenoh-collections", @@ -4864,6 +5055,7 @@ dependencies = [ "zenoh-link", "zenoh-protocol", "zenoh-result", + "zenoh-runtime", "zenoh-shm", "zenoh-sync", "zenoh-util", @@ -4884,6 +5076,7 @@ dependencies = [ "log", "pnet_datalink", "shellexpand", + "tokio", "winapi", "zenoh-core", "zenoh-result", @@ -4909,7 +5102,6 @@ dependencies = [ name = "zenohd" version = "0.11.0-dev" dependencies = [ - "async-std", "clap", "env_logger", "futures", @@ -4919,6 +5111,7 @@ dependencies = [ "log", "rand 0.8.5", "rustc_version 0.4.0", + "tokio", "zenoh", ] diff --git a/Cargo.toml b/Cargo.toml index d82d8ae7a5..d9be6e3685 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,6 +27,7 @@ members = [ "commons/zenoh-shm", "commons/zenoh-sync", "commons/zenoh-util", + "commons/zenoh-runtime", "examples", "io/zenoh-link", "io/zenoh-link-commons", @@ -77,7 +78,6 @@ anyhow = { version = "1.0.69", default-features = false } # Default features are async-executor = "1.5.0" async-global-executor = "2.3.1" async-io = "1.13.0" -async-rustls = "0.4.0" async-std = { version = "=1.12.0", default-features = false } # Default features are disabled due to some crates' requirements async-trait = "0.1.60" base64 = "0.21.4" @@ -88,7 +88,7 @@ crc = "3.0.1" criterion = "0.5" derive_more = "0.99.17" derive-new = "0.6.0" -env_logger = "0.10.0" +env_logger = "0.11.0" event-listener = "4.0.0" flume = "0.11" form_urlencoded = "1.1.0" @@ -127,10 +127,11 @@ regex = "1.7.1" ringbuffer-spsc = "0.1.9" rsa = "0.9" rustc_version = "0.4.0" -rustls = { version = "0.21.5", features = ["dangerous_configuration"] } +rustls = "0.22.2" rustls-native-certs = "0.7.0" rustls-pemfile = "2.0.0" rustls-webpki = "0.102.0" +rustls-pki-types = "1.1.0" schemars = "0.8.12" secrecy = { version = "0.8.0", features = ["serde", "alloc"] } serde = { version = "1.0.154", default-features = false, features = [ @@ -146,8 +147,12 @@ stop-token = "0.7.0" syn = "2.0" tide = "0.16.0" token-cell = { version = "1.4.2", default-features = false } -tokio = { version = "1.26.0", default-features = false } # Default features are disabled due to some crates' requirements -tokio-tungstenite = "0.20" +tokio = { version = "1.35.1", default-features = false } # Default features are disabled due to some crates' requirements +tokio-util = "0.7.10" +tokio-tungstenite = "0.21" +tokio-rustls = "0.25.0" +console-subscriber = "0.2" +typenum = "1.16.0" uhlc = { version = "0.6.0", default-features = false } # Default features are disabled due to usage in no_std crates unzip-n = "0.1.2" url = "2.3.1" @@ -188,6 +193,7 @@ zenoh-link-serial = { version = "0.11.0-dev", path = "io/zenoh-links/zenoh-link- zenoh-link = { version = "0.11.0-dev", path = "io/zenoh-link" } zenoh-link-commons = { version = "0.11.0-dev", path = "io/zenoh-link-commons" } zenoh = { version = "0.11.0-dev", path = "zenoh", default-features = false } +zenoh-runtime = { version = "0.11.0-dev", path = "commons/zenoh-runtime" } [profile.dev] debug = true diff --git a/commons/zenoh-core/Cargo.toml b/commons/zenoh-core/Cargo.toml index fcc59c7909..7890646d1b 100644 --- a/commons/zenoh-core/Cargo.toml +++ b/commons/zenoh-core/Cargo.toml @@ -18,9 +18,9 @@ version = { workspace = true } repository = { workspace = true } homepage = { workspace = true } authors = [ - "kydos ", - "Luca Cominardi ", - "Pierre Avital ", + "kydos ", + "Luca Cominardi ", + "Pierre Avital ", ] edition = { workspace = true } license = { workspace = true } @@ -32,6 +32,8 @@ std = [] default = ["std"] [dependencies] -async-std = { workspace = true, features = ["default"] } +tokio = { workspace = true, features = ["rt"] } +async-global-executor = { workspace = true, features = ["tokio"] } lazy_static = { workspace = true } zenoh-result = { workspace = true } +zenoh-runtime = { workspace = true } diff --git a/commons/zenoh-core/src/lib.rs b/commons/zenoh-core/src/lib.rs index f3ba5fd499..e15ff1d3bf 100644 --- a/commons/zenoh-core/src/lib.rs +++ b/commons/zenoh-core/src/lib.rs @@ -163,7 +163,7 @@ where F: Future + Send, { fn res_sync(self) -> ::To { - async_std::task::block_on(self.0) + zenoh_runtime::ZRuntime::Application.block_in_place(self.0) } } diff --git a/commons/zenoh-core/src/macros.rs b/commons/zenoh-core/src/macros.rs index b0cbb24963..20b84f213f 100644 --- a/commons/zenoh-core/src/macros.rs +++ b/commons/zenoh-core/src/macros.rs @@ -56,7 +56,7 @@ macro_rules! zasynclock { #[macro_export] macro_rules! zasyncread { ($var:expr) => { - if let Some(g) = $var.try_read() { + if let Ok(g) = $var.try_read() { g } else { $var.read().await @@ -64,27 +64,13 @@ macro_rules! zasyncread { }; } -// This macro performs an async read with upgrade to write option on RwLock -// For performance reasons, it first performs a try_upgradable_read() and, -// if it fails, it falls back on upgradable_read().await -#[macro_export] -macro_rules! zasyncread_upgradable { - ($var:expr) => { - if let Some(g) = $var.try_upgradable_read() { - g - } else { - $var.upgradable_read().await - } - }; -} - // This macro performs an async write on RwLock // For performance reasons, it first performs a try_write() and, // if it fails, it falls back on write().await #[macro_export] macro_rules! zasyncwrite { ($var:expr) => { - if let Some(g) = $var.try_write() { + if let Ok(g) = $var.try_write() { g } else { $var.write().await @@ -223,3 +209,11 @@ macro_rules! zcondfeat { } }}; } + +// This macro allows to timeout a feature +#[macro_export] +macro_rules! ztimeout { + ($f:expr) => { + tokio::time::timeout(TIMEOUT, $f).await.unwrap() + }; +} diff --git a/commons/zenoh-macros/src/lib.rs b/commons/zenoh-macros/src/lib.rs index 5c208fe90f..b77dffeba0 100644 --- a/commons/zenoh-macros/src/lib.rs +++ b/commons/zenoh-macros/src/lib.rs @@ -304,9 +304,9 @@ impl syn::parse::Parse for FormatUsage { /// Write a set of values into a `Formatter`, stopping as soon as a value doesn't fit the specification for its field. /// Contrary to `keformat` doesn't build the Formatter into a Key Expression. /// -/// `kewrite!($formatter, $($ident [= $expr]),*)` will attempt to write `$expr` into their respective `$ident` fields for `$formatter`. -/// `$formatter` must be an expression that dereferences to `&mut Formatter`. -/// `$expr` must resolve to a value that implements `core::fmt::Display`. +/// `kewrite!($formatter, $($ident [= $expr]),*)` will attempt to write `$expr` into their respective `$ident` fields for `$formatter`. +/// `$formatter` must be an expression that dereferences to `&mut Formatter`. +/// `$expr` must resolve to a value that implements `core::fmt::Display`. /// `$expr` defaults to `$ident` if omitted. /// /// This macro always results in an expression that resolves to `Result<&mut Formatter, FormatSetError>`. @@ -326,9 +326,9 @@ pub fn kewrite(tokens: TokenStream) -> TokenStream { /// Write a set of values into a `Formatter` and then builds it into an `OwnedKeyExpr`, stopping as soon as a value doesn't fit the specification for its field. /// -/// `keformat!($formatter, $($ident [= $expr]),*)` will attempt to write `$expr` into their respective `$ident` fields for `$formatter`. -/// `$formatter` must be an expression that dereferences to `&mut Formatter`. -/// `$expr` must resolve to a value that implements `core::fmt::Display`. +/// `keformat!($formatter, $($ident [= $expr]),*)` will attempt to write `$expr` into their respective `$ident` fields for `$formatter`. +/// `$formatter` must be an expression that dereferences to `&mut Formatter`. +/// `$expr` must resolve to a value that implements `core::fmt::Display`. /// `$expr` defaults to `$ident` if omitted. /// /// This macro always results in an expression that resolves to `ZResult`, and leaves `$formatter` in its written state. diff --git a/commons/zenoh-protocol/src/transport/mod.rs b/commons/zenoh-protocol/src/transport/mod.rs index cdf994e5dd..258b43baf6 100644 --- a/commons/zenoh-protocol/src/transport/mod.rs +++ b/commons/zenoh-protocol/src/transport/mod.rs @@ -65,6 +65,15 @@ pub struct TransportMessageLowLatency { pub body: TransportBodyLowLatency, } +impl TryFrom for TransportMessageLowLatency { + type Error = zenoh_result::Error; + fn try_from(msg: NetworkMessage) -> Result { + Ok(Self { + body: TransportBodyLowLatency::Network(msg), + }) + } +} + #[allow(clippy::large_enum_variant)] #[derive(Debug)] pub enum TransportBodyLowLatency { diff --git a/commons/zenoh-runtime/Cargo.toml b/commons/zenoh-runtime/Cargo.toml new file mode 100644 index 0000000000..b7aa15d634 --- /dev/null +++ b/commons/zenoh-runtime/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "zenoh-runtime" +rust-version = { workspace = true } +version = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +authors = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +categories = { workspace = true } +description = { workspace = true } + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +lazy_static = { workspace = true } +zenoh-result = { workspace = true, features = ["std"] } +zenoh-collections = { workspace = true, features = ["std"] } +tokio = { workspace = true, features = ["fs", "io-util", "macros", "net", "rt-multi-thread", "sync", "time"] } diff --git a/commons/zenoh-runtime/src/lib.rs b/commons/zenoh-runtime/src/lib.rs new file mode 100644 index 0000000000..fb2186ecb4 --- /dev/null +++ b/commons/zenoh-runtime/src/lib.rs @@ -0,0 +1,202 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use lazy_static::lazy_static; +use std::{ + collections::HashMap, + env, + future::Future, + ops::Deref, + sync::{ + atomic::{AtomicUsize, Ordering}, + OnceLock, + }, +}; +use tokio::runtime::{Handle, Runtime}; +use zenoh_collections::Properties; +use zenoh_result::ZResult as Result; + +const ZENOH_RUNTIME_THREADS_ENV: &str = "ZENOH_RUNTIME_THREADS"; + +#[derive(Hash, Eq, PartialEq, Clone, Copy, Debug)] +pub enum ZRuntime { + Application, + Acceptor, + TX, + RX, + Net, +} + +impl ZRuntime { + fn iter() -> impl Iterator { + use ZRuntime::*; + [Application, Acceptor, TX, RX, Net].into_iter() + } + + fn init(&self) -> Result { + // dbg!(*ZRUNTIME_CONFIG); + let config = &ZRUNTIME_CONFIG; + + let thread_name = format!("{self:?}"); + + use ZRuntime::*; + let rt = match self { + Application => tokio::runtime::Builder::new_multi_thread() + .worker_threads(config.application_threads) + .enable_io() + .enable_time() + .thread_name_fn(move || { + static ATOMIC_THREAD_ID: AtomicUsize = AtomicUsize::new(0); + let id = ATOMIC_THREAD_ID.fetch_add(1, Ordering::SeqCst); + format!("{thread_name}-{}", id) + }) + .build()?, + Acceptor => tokio::runtime::Builder::new_multi_thread() + .worker_threads(config.acceptor_threads) + .enable_io() + .enable_time() + .thread_name_fn(move || { + static ATOMIC_THREAD_ID: AtomicUsize = AtomicUsize::new(0); + let id = ATOMIC_THREAD_ID.fetch_add(1, Ordering::SeqCst); + format!("{thread_name}-{}", id) + }) + .build()?, + TX => tokio::runtime::Builder::new_multi_thread() + .worker_threads(config.tx_threads) + .enable_io() + .enable_time() + .thread_name_fn(move || { + static ATOMIC_THREAD_ID: AtomicUsize = AtomicUsize::new(0); + let id = ATOMIC_THREAD_ID.fetch_add(1, Ordering::SeqCst); + format!("{thread_name}-{}", id) + }) + .build()?, + RX => tokio::runtime::Builder::new_multi_thread() + .worker_threads(config.rx_threads) + .enable_io() + .enable_time() + .thread_name_fn(move || { + static ATOMIC_THREAD_ID: AtomicUsize = AtomicUsize::new(0); + let id = ATOMIC_THREAD_ID.fetch_add(1, Ordering::SeqCst); + format!("{thread_name}-{}", id) + }) + .build()?, + Net => tokio::runtime::Builder::new_multi_thread() + .worker_threads(config.net_threads) + .enable_io() + .enable_time() + .thread_name_fn(move || { + static ATOMIC_THREAD_ID: AtomicUsize = AtomicUsize::new(0); + let id = ATOMIC_THREAD_ID.fetch_add(1, Ordering::SeqCst); + format!("{thread_name}-{}", id) + }) + .build()?, + }; + + Ok(rt) + } + + pub fn block_in_place(&self, f: F) -> R + where + F: Future, + { + tokio::task::block_in_place(move || self.block_on(f)) + } +} + +impl Deref for ZRuntime { + type Target = Handle; + fn deref(&self) -> &Self::Target { + ZRUNTIME_POOL.get(self) + } +} + +lazy_static! { + pub static ref ZRUNTIME_CONFIG: ZRuntimeConfig = ZRuntimeConfig::from_env(); + pub static ref ZRUNTIME_POOL: ZRuntimePool = ZRuntimePool::new(); +} + +pub struct ZRuntimePool(HashMap>); + +impl ZRuntimePool { + fn new() -> Self { + Self(ZRuntime::iter().map(|zrt| (zrt, OnceLock::new())).collect()) + } + + pub fn get(&self, zrt: &ZRuntime) -> &Handle { + self.0 + .get(zrt) + .expect("The hashmap should contains {zrt} after initialization") + .get_or_init(|| zrt.init().expect("Failed to init {zrt}")) + .handle() + } +} + +#[derive(Debug, Copy, Clone)] +pub struct ZRuntimeConfig { + pub application_threads: usize, + pub acceptor_threads: usize, + pub tx_threads: usize, + pub rx_threads: usize, + pub net_threads: usize, +} + +impl ZRuntimeConfig { + fn from_env() -> ZRuntimeConfig { + let mut c = Self::default(); + + if let Ok(s) = env::var(ZENOH_RUNTIME_THREADS_ENV) { + let ps = Properties::from(s); + if let Some(n) = ps.get("tx") { + if let Ok(n) = n.parse::() { + c.tx_threads = n; + } + } + if let Some(n) = ps.get("rx") { + if let Ok(n) = n.parse::() { + c.rx_threads = n; + } + } + if let Some(n) = ps.get("net") { + if let Ok(n) = n.parse::() { + c.net_threads = n; + } + } + if let Some(n) = ps.get("acceptor") { + if let Ok(n) = n.parse::() { + c.acceptor_threads = n; + } + } + if let Some(n) = ps.get("application") { + if let Ok(n) = n.parse::() { + c.application_threads = n; + } + } + } + + c + } +} + +// WARN: at least two otherwise fail on the routing test +impl Default for ZRuntimeConfig { + fn default() -> Self { + Self { + application_threads: 2, + acceptor_threads: 2, + tx_threads: 2, + rx_threads: 2, + net_threads: 2, + } + } +} diff --git a/commons/zenoh-sync/Cargo.toml b/commons/zenoh-sync/Cargo.toml index b660dd371a..01e8e935fd 100644 --- a/commons/zenoh-sync/Cargo.toml +++ b/commons/zenoh-sync/Cargo.toml @@ -18,9 +18,9 @@ version = { workspace = true } repository = { workspace = true } homepage = { workspace = true } authors = [ - "kydos ", - "Luca Cominardi ", - "Pierre Avital ", + "kydos ", + "Luca Cominardi ", + "Pierre Avital ", ] edition = { workspace = true } license = { workspace = true } @@ -29,13 +29,14 @@ description = "Internal crate for zenoh." # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -async-std = { workspace = true, features = ["default", "unstable"] } event-listener = { workspace = true } futures = { workspace = true } -tokio = { workspace = true, features = ["default", "sync"] } +tokio = { workspace = true, features = ["sync"] } zenoh-buffers = { workspace = true } zenoh-collections = { workspace = true, features = ["default"] } zenoh-core = { workspace = true } +zenoh-runtime = { workspace = true } [dev-dependencies] -async-std = { workspace = true, features = ["default", "unstable", "attributes"] } +tokio = { workspace = true, features = ["macros", "sync", "rt-multi-thread", "time"] } +zenoh-result = { workspace = true } diff --git a/commons/zenoh-sync/src/condition.rs b/commons/zenoh-sync/src/condition.rs index bae030abbb..098aa05411 100644 --- a/commons/zenoh-sync/src/condition.rs +++ b/commons/zenoh-sync/src/condition.rs @@ -11,9 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::sync::MutexGuard as AysncMutexGuard; use event_listener::{Event, EventListener}; use std::{pin::Pin, sync::MutexGuard}; +use tokio::sync::MutexGuard as AysncMutexGuard; pub type ConditionWaiter = Pin>; /// This is a Condition Variable similar to that provided by POSIX. diff --git a/commons/zenoh-sync/src/fifo_queue.rs b/commons/zenoh-sync/src/fifo_queue.rs index 7460aabb4a..e0ce57cb36 100644 --- a/commons/zenoh-sync/src/fifo_queue.rs +++ b/commons/zenoh-sync/src/fifo_queue.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use crate::Condition; -use async_std::sync::Mutex; +use tokio::sync::Mutex; use zenoh_collections::RingBuffer; use zenoh_core::zasynclock; @@ -32,7 +32,7 @@ impl FifoQueue { } pub fn try_push(&self, x: T) -> Option { - if let Some(mut guard) = self.buffer.try_lock() { + if let Ok(mut guard) = self.buffer.try_lock() { let res = guard.push(x); if res.is_none() { drop(guard); @@ -57,7 +57,7 @@ impl FifoQueue { } pub fn try_pull(&self) -> Option { - if let Some(mut guard) = self.buffer.try_lock() { + if let Ok(mut guard) = self.buffer.try_lock() { if let Some(e) = guard.pull() { drop(guard); self.not_full.notify_one(); diff --git a/commons/zenoh-sync/src/lifo_queue.rs b/commons/zenoh-sync/src/lifo_queue.rs index e4cf00ca65..f29614d4b2 100644 --- a/commons/zenoh-sync/src/lifo_queue.rs +++ b/commons/zenoh-sync/src/lifo_queue.rs @@ -11,28 +11,27 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::Condition; -use async_std::sync::Mutex; +use std::sync::{Condvar, Mutex}; use zenoh_collections::StackBuffer; -use zenoh_core::zasynclock; +use zenoh_core::zlock; pub struct LifoQueue { - not_empty: Condition, - not_full: Condition, + not_empty: Condvar, + not_full: Condvar, buffer: Mutex>, } impl LifoQueue { pub fn new(capacity: usize) -> LifoQueue { LifoQueue { - not_empty: Condition::new(), - not_full: Condition::new(), + not_empty: Condvar::new(), + not_full: Condvar::new(), buffer: Mutex::new(StackBuffer::new(capacity)), } } pub fn try_push(&self, x: T) -> Option { - if let Some(mut guard) = self.buffer.try_lock() { + if let Ok(mut guard) = self.buffer.try_lock() { let res = guard.push(x); if res.is_none() { drop(guard); @@ -43,21 +42,21 @@ impl LifoQueue { Some(x) } - pub async fn push(&self, x: T) { + pub fn push(&self, x: T) { + let mut guard = zlock!(self.buffer); loop { - let mut guard = zasynclock!(self.buffer); if !guard.is_full() { guard.push(x); drop(guard); self.not_empty.notify_one(); return; } - self.not_full.wait(guard).await; + guard = self.not_full.wait(guard).unwrap(); } } pub fn try_pull(&self) -> Option { - if let Some(mut guard) = self.buffer.try_lock() { + if let Ok(mut guard) = self.buffer.try_lock() { if let Some(e) = guard.pop() { drop(guard); self.not_full.notify_one(); @@ -67,15 +66,15 @@ impl LifoQueue { None } - pub async fn pull(&self) -> T { + pub fn pull(&self) -> T { + let mut guard = zlock!(self.buffer); loop { - let mut guard = zasynclock!(self.buffer); if let Some(e) = guard.pop() { drop(guard); self.not_full.notify_one(); return e; } - self.not_empty.wait(guard).await; + guard = self.not_empty.wait(guard).unwrap(); } } } diff --git a/commons/zenoh-sync/src/mvar.rs b/commons/zenoh-sync/src/mvar.rs index fa0aebdff8..1b4a90e1e2 100644 --- a/commons/zenoh-sync/src/mvar.rs +++ b/commons/zenoh-sync/src/mvar.rs @@ -12,8 +12,8 @@ // ZettaScale Zenoh Team, // use crate::Condition; -use async_std::sync::Mutex; use std::sync::atomic::{AtomicUsize, Ordering}; +use tokio::sync::Mutex; use zenoh_core::zasynclock; pub struct Mvar { @@ -90,12 +90,13 @@ impl Default for Mvar { } } +#[cfg(test)] mod tests { - #[test] - fn mvar() { + use zenoh_result::ZResult; + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn mvar() -> ZResult<()> { use super::Mvar; - use async_std::prelude::FutureExt; - use async_std::task; use std::sync::Arc; use std::time::Duration; @@ -105,24 +106,23 @@ mod tests { let mvar: Arc> = Arc::new(Mvar::new()); let c_mvar = mvar.clone(); - let ch = task::spawn(async move { + let ch = tokio::task::spawn(async move { for _ in 0..count { let n = c_mvar.take().await; print!("-{n} "); } }); - let ph = task::spawn(async move { + let ph = tokio::task::spawn(async move { for i in 0..count { mvar.put(i).await; print!("+{i} "); } }); - task::block_on(async { - ph.timeout(TIMEOUT).await.unwrap(); - ch.timeout(TIMEOUT).await.unwrap(); - }); + let _ = tokio::time::timeout(TIMEOUT, ph).await?; + let _ = tokio::time::timeout(TIMEOUT, ch).await?; println!(); + Ok(()) } } diff --git a/commons/zenoh-sync/src/object_pool.rs b/commons/zenoh-sync/src/object_pool.rs index d26bc7ea7c..83b673c449 100644 --- a/commons/zenoh-sync/src/object_pool.rs +++ b/commons/zenoh-sync/src/object_pool.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // use super::LifoQueue; -use async_std::task; use std::{ any::Any, fmt, @@ -51,8 +50,8 @@ impl T> RecyclingObjectPool { .map(|obj| RecyclingObject::new(obj, Arc::downgrade(&self.inner))) } - pub async fn take(&self) -> RecyclingObject { - let obj = self.inner.pull().await; + pub fn take(&self) -> RecyclingObject { + let obj = self.inner.pull(); RecyclingObject::new(obj, Arc::downgrade(&self.inner)) } } @@ -71,10 +70,10 @@ impl RecyclingObject { } } - pub async fn recycle(mut self) { + pub fn recycle(mut self) { if let Some(pool) = self.pool.upgrade() { if let Some(obj) = self.object.take() { - pool.push(obj).await; + pool.push(obj); } } } @@ -113,7 +112,7 @@ impl Drop for RecyclingObject { fn drop(&mut self) { if let Some(pool) = self.pool.upgrade() { if let Some(obj) = self.object.take() { - task::block_on(pool.push(obj)); + pool.push(obj); } } } diff --git a/commons/zenoh-sync/src/signal.rs b/commons/zenoh-sync/src/signal.rs index 998435e661..74dd3e5199 100644 --- a/commons/zenoh-sync/src/signal.rs +++ b/commons/zenoh-sync/src/signal.rs @@ -69,26 +69,25 @@ impl Default for Signal { #[cfg(test)] mod tests { use super::*; - use async_std::task; use std::time::Duration; - #[async_std::test] + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn signal_test() { let signal = Signal::new(); // spawn publisher - let r#pub = task::spawn({ + let r#pub = tokio::task::spawn({ let signal = signal.clone(); async move { - task::sleep(Duration::from_millis(200)).await; + tokio::time::sleep(Duration::from_millis(200)).await; signal.trigger(); signal.trigger(); // second trigger should not break } }); // spawn subscriber that waits immediately - let fast_sub = task::spawn({ + let fast_sub = tokio::task::spawn({ let signal = signal.clone(); async move { @@ -97,17 +96,17 @@ mod tests { }); // spawn subscriber that waits after the publisher triggers the signal - let slow_sub = task::spawn({ + let slow_sub = tokio::task::spawn({ let signal = signal.clone(); async move { - task::sleep(Duration::from_millis(400)).await; + tokio::time::sleep(Duration::from_millis(400)).await; signal.wait().await; } }); // check that the slow subscriber does not half - let result = async_std::future::timeout( + let result = tokio::time::timeout( Duration::from_millis(50000), futures::future::join3(r#pub, fast_sub, slow_sub), ) diff --git a/commons/zenoh-util/Cargo.toml b/commons/zenoh-util/Cargo.toml index 256f53d33a..7a66600e79 100644 --- a/commons/zenoh-util/Cargo.toml +++ b/commons/zenoh-util/Cargo.toml @@ -37,6 +37,7 @@ default = ["std"] [dependencies] async-std = { workspace = true, features = ["default", "unstable"] } +tokio = { workspace = true, features = ["time", "net"] } async-trait = { workspace = true } flume = { workspace = true } home = { workspace = true } diff --git a/commons/zenoh-util/src/std_only/net/mod.rs b/commons/zenoh-util/src/std_only/net/mod.rs index 8658e24bbc..8ec0ed03a1 100644 --- a/commons/zenoh-util/src/std_only/net/mod.rs +++ b/commons/zenoh-util/src/std_only/net/mod.rs @@ -11,9 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::net::{TcpListener, TcpStream, UdpSocket}; use std::net::{IpAddr, Ipv6Addr}; -use std::time::Duration; +use tokio::net::{TcpSocket, UdpSocket}; use zenoh_core::zconfigurable; #[cfg(unix)] use zenoh_result::zerror; @@ -24,73 +23,6 @@ zconfigurable! { static ref WINDOWS_GET_ADAPTERS_ADDRESSES_MAX_RETRIES: u32 = 3; } -pub fn set_linger(socket: &TcpStream, dur: Option) -> ZResult<()> { - #[cfg(unix)] - { - use std::os::unix::io::AsRawFd; - - let raw_socket = socket.as_raw_fd(); - let linger = match dur { - Some(d) => libc::linger { - l_onoff: 1, - l_linger: d.as_secs() as libc::c_int, - }, - None => libc::linger { - l_onoff: 0, - l_linger: 0, - }, - }; - - // Set the SO_LINGER option - unsafe { - let ret = libc::setsockopt( - raw_socket, - libc::SOL_SOCKET, - libc::SO_LINGER, - &linger as *const libc::linger as *const libc::c_void, - std::mem::size_of_val(&linger) as libc::socklen_t, - ); - match ret { - 0 => Ok(()), - err_code => bail!("setsockopt returned {}", err_code), - } - } - } - - #[cfg(windows)] - { - use std::os::windows::io::AsRawSocket; - use winapi::um::winsock2; - use winapi::um::ws2tcpip; - - let raw_socket = socket.as_raw_socket(); - let linger = match dur { - Some(d) => winsock2::linger { - l_onoff: 1, - l_linger: d.as_secs() as u16, - }, - None => winsock2::linger { - l_onoff: 0, - l_linger: 0, - }, - }; - - unsafe { - let ret = winsock2::setsockopt( - raw_socket.try_into().unwrap(), - winsock2::SOL_SOCKET, - winsock2::SO_LINGER, - &linger as *const winsock2::linger as *const i8, - std::mem::size_of_val(&linger) as ws2tcpip::socklen_t, - ); - match ret { - 0 => Ok(()), - err_code => bail!("setsockopt returned {}", err_code), - } - } - } -} - #[cfg(windows)] unsafe fn get_adapters_adresses(af_spec: i32) -> ZResult> { use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; @@ -123,7 +55,6 @@ unsafe fn get_adapters_adresses(af_spec: i32) -> ZResult> { Ok(buffer) } - pub fn get_interface(name: &str) -> ZResult> { #[cfg(unix)] { @@ -493,51 +424,29 @@ pub fn get_ipv6_ipaddrs(interface: Option<&str>) -> Vec { } #[cfg(target_os = "linux")] -fn set_bind_to_device(socket: std::os::raw::c_int, iface: Option<&str>) { +pub fn set_bind_to_device_tcp_socket(socket: &TcpSocket, iface: Option<&str>) -> ZResult<()> { if let Some(iface) = iface { - // @TODO: switch to bind_device after tokio porting - log::debug!("Listen at the interface: {}", iface); - unsafe { - libc::setsockopt( - socket, - libc::SOL_SOCKET, - libc::SO_BINDTODEVICE, - iface.as_ptr() as *const std::os::raw::c_void, - iface.len() as libc::socklen_t, - ); - } + socket.bind_device(Some(iface.as_bytes()))?; } + Ok(()) } #[cfg(target_os = "linux")] -pub fn set_bind_to_device_tcp_listener(socket: &TcpListener, iface: Option<&str>) { - use std::os::fd::AsRawFd; - set_bind_to_device(socket.as_raw_fd(), iface); -} - -#[cfg(target_os = "linux")] -pub fn set_bind_to_device_tcp_stream(socket: &TcpStream, iface: Option<&str>) { - use std::os::fd::AsRawFd; - set_bind_to_device(socket.as_raw_fd(), iface); -} - -#[cfg(target_os = "linux")] -pub fn set_bind_to_device_udp_socket(socket: &UdpSocket, iface: Option<&str>) { - use std::os::fd::AsRawFd; - set_bind_to_device(socket.as_raw_fd(), iface); -} - -#[cfg(any(target_os = "macos", target_os = "windows"))] -pub fn set_bind_to_device_tcp_listener(_socket: &TcpListener, _iface: Option<&str>) { - log::warn!("Listen at the interface is not supported for this platform"); +pub fn set_bind_to_device_udp_socket(socket: &UdpSocket, iface: Option<&str>) -> ZResult<()> { + if let Some(iface) = iface { + socket.bind_device(Some(iface.as_bytes()))?; + } + Ok(()) } #[cfg(any(target_os = "macos", target_os = "windows"))] -pub fn set_bind_to_device_tcp_stream(_socket: &TcpStream, _iface: Option<&str>) { - log::warn!("Listen at the interface is not supported for this platform"); +pub fn set_bind_to_device_tcp_socket(socket: &TcpSocket, iface: Option<&str>) -> ZResult<()> { + log::warn!("Binding the socket {socket:?} to the interface {iface:?} is not supported on macOS and Windows"); + Ok(()) } #[cfg(any(target_os = "macos", target_os = "windows"))] -pub fn set_bind_to_device_udp_socket(_socket: &UdpSocket, _iface: Option<&str>) { - log::warn!("Listen at the interface is not supported for this platform"); +pub fn set_bind_to_device_udp_socket(socket: &UdpSocket, iface: Option<&str>) -> ZResult<()> { + log::warn!("Binding the socket {socket:?} to the interface {iface:?} is not supported on macOS and Windows"); + Ok(()) } diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 190894fb18..4833be3963 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -36,12 +36,12 @@ transport_unixpipe = ["zenoh/transport_unixpipe"] # # [target.'cfg(unix)'.dependencies] # zenoh = { workspace = true, features = ["transport_unixpipe"] } -# +# # [target.'cfg(not(unix))'.dependencies] # zenoh = { workspace = true } [dependencies] -async-std = { workspace = true, features = ["attributes"] } +tokio = { workspace = true, features = ["rt-multi-thread", "time", "io-std"] } clap = { workspace = true, features = ["derive"] } env_logger = { workspace = true } flume = { workspace = true } diff --git a/examples/README.md b/examples/README.md index 8e5b3085ba..92bf388aa5 100644 --- a/examples/README.md +++ b/examples/README.md @@ -36,7 +36,7 @@ ### z_put - Puts a path/value into Zenoh. + Puts a path/value into Zenoh. The path/value will be received by all matching subscribers, for instance the [z_sub](#z_sub) and [z_storage](#z_storage) examples. @@ -65,7 +65,7 @@ ### z_sub - Declares a key expression and a subscriber. + Declares a key expression and a subscriber. The subscriber will be notified of each `put` or `delete` made on any key expression matching the subscriber key expression, and will print this notification. Typical usage: @@ -74,12 +74,12 @@ ``` or ```bash - z_sub -k demo/** + z_sub -k 'demo/**' ``` ### z_pull - Declares a key expression and a pull subscriber. + Declares a key expression and a pull subscriber. On each pull, the pull subscriber will be notified of the last `put` or `delete` made on each key expression matching the subscriber key expression, and will print this notification. @@ -89,12 +89,12 @@ ``` or ```bash - z_pull -k demo/** + z_pull -k 'demo/**' ``` ### z_get - Sends a query message for a selector. + Sends a query message for a selector. The queryables with a matching path or selector (for instance [z_queryable](#z_queryable) and [z_storage](#z_storage)) will receive this query and reply with paths/values that will be received by the receiver stream. @@ -104,12 +104,12 @@ ``` or ```bash - z_get -s demo/** + z_get -s 'demo/**' ``` ### z_queryable - Declares a queryable function with a path. + Declares a queryable function with a path. This queryable function will be triggered by each call to get with a selector that matches the path, and will return a value to the querier. @@ -124,7 +124,7 @@ ### z_storage - Trivial implementation of a storage in memory. + Trivial implementation of a storage in memory. This example declares a subscriber and a queryable on the same selector. The subscriber callback will store the received paths/values in a hashmap. The queryable callback will answer to queries with the paths/values stored in the hashmap @@ -136,7 +136,7 @@ ``` or ```bash - z_storage -k demo/** + z_storage -k 'demo/**' ``` ### z_pub_shm & z_sub @@ -173,8 +173,8 @@ ### z_ping & z_pong Pub/Sub roundtrip time test. - This example allows performing roundtrip time measurements. The z_ping example - performs a put operation on a first key expression, waits for a reply from the pong + This example allows performing roundtrip time measurements. The z_ping example + performs a put operation on a first key expression, waits for a reply from the pong example on a second key expression and measures the time between the two. The pong application waits for samples on the first key expression and replies by writing back the received data on the second key expression. @@ -220,7 +220,7 @@ ``` or ```bash - z_liveliness -k group1/member1 + z_liveliness -k 'group1/member1' ``` ### z_get_liveliness @@ -234,7 +234,7 @@ ``` or ```bash - z_get_liveliness -k group1/** + z_get_liveliness -k 'group1/**' ``` ### z_sub_liveliness @@ -252,5 +252,5 @@ ``` or ```bash - z_sub_liveliness -k group1/** + z_sub_liveliness -k 'group1/**' ``` diff --git a/examples/examples/z_delete.rs b/examples/examples/z_delete.rs index f80e199d6d..a090458c71 100644 --- a/examples/examples/z_delete.rs +++ b/examples/examples/z_delete.rs @@ -16,7 +16,7 @@ use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; -#[async_std::main] +#[tokio::main] async fn main() { // initiate logging env_logger::init(); diff --git a/examples/examples/z_forward.rs b/examples/examples/z_forward.rs index 5dd786843e..486ccc4fdb 100644 --- a/examples/examples/z_forward.rs +++ b/examples/examples/z_forward.rs @@ -17,7 +17,7 @@ use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; use zenoh_ext::SubscriberForward; -#[async_std::main] +#[tokio::main] async fn main() { // Initiate logging env_logger::init(); diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 57c36c2e62..0603b4f9fb 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -18,7 +18,7 @@ use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; -#[async_std::main] +#[tokio::main] async fn main() { // initiate logging env_logger::init(); diff --git a/examples/examples/z_get_liveliness.rs b/examples/examples/z_get_liveliness.rs index e0aaf8cd23..3538b7a05c 100644 --- a/examples/examples/z_get_liveliness.rs +++ b/examples/examples/z_get_liveliness.rs @@ -18,7 +18,7 @@ use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; -#[async_std::main] +#[tokio::main] async fn main() { // initiate logging env_logger::init(); diff --git a/examples/examples/z_info.rs b/examples/examples/z_info.rs index ce752b2e7e..1d047f9454 100644 --- a/examples/examples/z_info.rs +++ b/examples/examples/z_info.rs @@ -16,7 +16,7 @@ use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; -#[async_std::main] +#[tokio::main] async fn main() { // initiate logging env_logger::init(); diff --git a/examples/examples/z_liveliness.rs b/examples/examples/z_liveliness.rs index 06c83c1e6f..937868e091 100644 --- a/examples/examples/z_liveliness.rs +++ b/examples/examples/z_liveliness.rs @@ -16,7 +16,7 @@ use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; -#[async_std::main] +#[tokio::main] async fn main() { // Initiate logging env_logger::init(); diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 7ba17745b5..4863387df0 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -11,14 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::task::sleep; use clap::Parser; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; -#[async_std::main] +#[tokio::main] async fn main() { // Initiate logging env_logger::init(); @@ -33,7 +32,7 @@ async fn main() { println!("Press CTRL-C to quit..."); for idx in 0..u32::MAX { - sleep(Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_secs(1)).await; let buf = format!("[{idx:4}] {value}"); println!("Putting Data ('{}': '{}')...", &key_expr, buf); let mut put = publisher.put(buf); diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index c54fb358d3..2aadcf33de 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::task::sleep; use clap::Parser; use std::time::Duration; use zenoh::config::Config; @@ -22,7 +21,7 @@ use zenoh_examples::CommonArgs; const N: usize = 10; const K: u32 = 3; -#[async_std::main] +#[tokio::main] async fn main() -> Result<(), zenoh::Error> { // Initiate logging env_logger::init(); @@ -46,11 +45,11 @@ async fn main() -> Result<(), zenoh::Error> { println!("Press CTRL-C to quit..."); for idx in 0..(K * N as u32) { - sleep(Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_secs(1)).await; let mut sbuf = match shm.alloc(1024) { Ok(buf) => buf, Err(_) => { - sleep(Duration::from_millis(100)).await; + tokio::time::sleep(Duration::from_millis(100)).await; println!( "Afer failing allocation the GC collected: {} bytes -- retrying", shm.garbage_collect() diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 7c6f3cbbd3..c8a33f98fa 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -18,7 +18,7 @@ use zenoh::publication::CongestionControl; use zenoh::shm::SharedMemoryManager; use zenoh_examples::CommonArgs; -#[async_std::main] +#[tokio::main] async fn main() { // initiate logging env_logger::init(); diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index db1c9d0670..bd59be7dee 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -11,14 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::task::sleep; use clap::Parser; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; -#[async_std::main] +#[tokio::main] async fn main() { // initiate logging env_logger::init(); @@ -47,7 +46,7 @@ async fn main() { println!("Press CTRL-C to quit..."); for idx in 0..u32::MAX { - sleep(Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_secs(1)).await; println!("[{idx:4}] Pulling..."); subscriber.pull().res().await.unwrap(); } diff --git a/examples/examples/z_put.rs b/examples/examples/z_put.rs index a38f0c7f01..7b38490507 100644 --- a/examples/examples/z_put.rs +++ b/examples/examples/z_put.rs @@ -16,7 +16,7 @@ use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; -#[async_std::main] +#[tokio::main] async fn main() { // initiate logging env_logger::init(); diff --git a/examples/examples/z_put_float.rs b/examples/examples/z_put_float.rs index cc667df02c..33482e4680 100644 --- a/examples/examples/z_put_float.rs +++ b/examples/examples/z_put_float.rs @@ -16,7 +16,7 @@ use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; -#[async_std::main] +#[tokio::main] async fn main() { // initiate logging env_logger::init(); diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 20c946bc0b..2feac12a8e 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -16,7 +16,7 @@ use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; -#[async_std::main] +#[tokio::main] async fn main() { // initiate logging env_logger::init(); diff --git a/examples/examples/z_scout.rs b/examples/examples/z_scout.rs index 10581dbaa1..bc778cfc0f 100644 --- a/examples/examples/z_scout.rs +++ b/examples/examples/z_scout.rs @@ -11,12 +11,11 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::prelude::FutureExt; use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh::scouting::WhatAmI; -#[async_std::main] +#[tokio::main] async fn main() { // initiate logging env_logger::init(); @@ -27,12 +26,11 @@ async fn main() { .await .unwrap(); - let _ = async { + let _ = tokio::time::timeout(std::time::Duration::from_secs(1), async { while let Ok(hello) = receiver.recv_async().await { println!("{hello}"); } - } - .timeout(std::time::Duration::from_secs(1)) + }) .await; // stop scouting diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index 88849f5b0d..161db6819f 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -20,7 +20,7 @@ use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; -#[async_std::main] +#[tokio::main] async fn main() { // initiate logging env_logger::init(); diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index 3205f3270b..d2d86bea8b 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -16,7 +16,7 @@ use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; -#[async_std::main] +#[tokio::main] async fn main() { // Initiate logging env_logger::init(); diff --git a/examples/examples/z_sub_liveliness.rs b/examples/examples/z_sub_liveliness.rs index 596deaef00..0d0f9fc5ac 100644 --- a/examples/examples/z_sub_liveliness.rs +++ b/examples/examples/z_sub_liveliness.rs @@ -16,7 +16,7 @@ use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; -#[async_std::main] +#[tokio::main] async fn main() { // Initiate logging env_logger::init(); diff --git a/io/zenoh-link-commons/Cargo.toml b/io/zenoh-link-commons/Cargo.toml index 29bb0eabfc..ea21228a4b 100644 --- a/io/zenoh-link-commons/Cargo.toml +++ b/io/zenoh-link-commons/Cargo.toml @@ -28,9 +28,9 @@ description = "Internal crate for zenoh." compression = [] [dependencies] -async-std = { workspace = true } -zenoh-sync = { workspace = true } async-trait = { workspace = true } +rustls = { workspace = true } +rustls-webpki = { workspace = true } flume = { workspace = true } log = { workspace = true } serde = { workspace = true, features = ["default"] } @@ -40,3 +40,7 @@ zenoh-core = { workspace = true } zenoh-protocol = { workspace = true } zenoh-result = { workspace = true } zenoh-util = { workspace = true } +zenoh-runtime = { workspace = true } +tokio = { workspace = true, features = ["io-util", "net", "fs", "sync", "time"] } +tokio-util = { workspace = true, features = ["rt"] } +futures = { workspace = true } diff --git a/io/zenoh-link-commons/src/lib.rs b/io/zenoh-link-commons/src/lib.rs index 0a43aac3d9..f9ad7166ee 100644 --- a/io/zenoh-link-commons/src/lib.rs +++ b/io/zenoh-link-commons/src/lib.rs @@ -21,6 +21,7 @@ extern crate alloc; mod listener; mod multicast; +pub mod tls; mod unicast; use alloc::{borrow::ToOwned, boxed::Box, string::String, vec, vec::Vec}; @@ -56,9 +57,8 @@ pub trait LocatorInspector: Default { async fn is_multicast(&self, locator: &Locator) -> ZResult; } -#[async_trait] pub trait ConfigurationInspector: Default { - async fn inspect_config(&self, configuration: &C) -> ZResult; + fn inspect_config(&self, configuration: &C) -> ZResult; } impl fmt::Display for Link { diff --git a/io/zenoh-link-commons/src/listener.rs b/io/zenoh-link-commons/src/listener.rs index 7cf294de8a..be61e9cf89 100644 --- a/io/zenoh-link-commons/src/listener.rs +++ b/io/zenoh-link-commons/src/listener.rs @@ -11,73 +11,79 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::net::SocketAddr; -use async_std::task; -use async_std::task::JoinHandle; +use futures::Future; use std::collections::HashMap; use std::net::IpAddr; -use std::sync::atomic::{AtomicBool, Ordering}; +use std::net::SocketAddr; use std::sync::{Arc, RwLock}; +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use zenoh_core::{zread, zwrite}; use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_result::{zerror, ZResult}; -use zenoh_sync::Signal; use crate::BIND_INTERFACE; pub struct ListenerUnicastIP { endpoint: EndPoint, - active: Arc, - signal: Signal, + token: CancellationToken, handle: JoinHandle>, } impl ListenerUnicastIP { fn new( endpoint: EndPoint, - active: Arc, - signal: Signal, + token: CancellationToken, handle: JoinHandle>, ) -> ListenerUnicastIP { ListenerUnicastIP { endpoint, - active, - signal, + token, handle, } } + + async fn stop(&self) { + self.token.cancel(); + } } pub struct ListenersUnicastIP { + // TODO(yuyuan): should we change this to AsyncRwLock? listeners: Arc>>, + pub token: CancellationToken, } impl ListenersUnicastIP { pub fn new() -> ListenersUnicastIP { ListenersUnicastIP { listeners: Arc::new(RwLock::new(HashMap::new())), + token: CancellationToken::new(), } } - pub async fn add_listener( + pub async fn add_listener( &self, endpoint: EndPoint, addr: SocketAddr, - active: Arc, - signal: Signal, - handle: JoinHandle>, - ) -> ZResult<()> { + future: F, + token: CancellationToken, + ) -> ZResult<()> + where + F: Future> + Send + 'static, + { let mut listeners = zwrite!(self.listeners); let c_listeners = self.listeners.clone(); let c_addr = addr; - let wraphandle = task::spawn(async move { + let task = async move { // Wait for the accept loop to terminate - let res = handle.await; + let res = future.await; zwrite!(c_listeners).remove(&c_addr); res - }); + }; + let handle = zenoh_runtime::ZRuntime::Acceptor.spawn(task); - let listener = ListenerUnicastIP::new(endpoint, active, signal, wraphandle); + let listener = ListenerUnicastIP::new(endpoint, token, handle); // Update the list of active listeners on the manager listeners.insert(addr, listener); Ok(()) @@ -93,9 +99,8 @@ impl ListenersUnicastIP { })?; // Send the stop signal - listener.active.store(false, Ordering::Release); - listener.signal.trigger(); - listener.handle.await + listener.stop().await; + listener.handle.await? } pub fn get_endpoints(&self) -> Vec { diff --git a/io/zenoh-link-commons/src/tls.rs b/io/zenoh-link-commons/src/tls.rs new file mode 100644 index 0000000000..562b02c81e --- /dev/null +++ b/io/zenoh-link-commons/src/tls.rs @@ -0,0 +1,87 @@ +use alloc::vec::Vec; +use rustls::{ + client::{ + danger::{ServerCertVerified, ServerCertVerifier}, + verify_server_cert_signed_by_trust_anchor, + }, + crypto::{verify_tls12_signature, verify_tls13_signature}, + pki_types::{CertificateDer, ServerName, UnixTime}, + server::ParsedCertificate, + RootCertStore, +}; +use webpki::ALL_VERIFICATION_ALGS; + +impl ServerCertVerifier for WebPkiVerifierAnyServerName { + /// Will verify the certificate is valid in the following ways: + /// - Signed by a trusted `RootCertStore` CA + /// - Not Expired + fn verify_server_cert( + &self, + end_entity: &CertificateDer<'_>, + intermediates: &[CertificateDer<'_>], + _server_name: &ServerName<'_>, + _ocsp_response: &[u8], + now: UnixTime, + ) -> Result { + let cert = ParsedCertificate::try_from(end_entity)?; + verify_server_cert_signed_by_trust_anchor( + &cert, + &self.roots, + intermediates, + now, + ALL_VERIFICATION_ALGS, + )?; + Ok(ServerCertVerified::assertion()) + } + + fn verify_tls12_signature( + &self, + message: &[u8], + cert: &CertificateDer<'_>, + dss: &rustls::DigitallySignedStruct, + ) -> Result { + verify_tls12_signature( + message, + cert, + dss, + &rustls::crypto::ring::default_provider().signature_verification_algorithms, + ) + } + + fn verify_tls13_signature( + &self, + message: &[u8], + cert: &CertificateDer<'_>, + dss: &rustls::DigitallySignedStruct, + ) -> Result { + verify_tls13_signature( + message, + cert, + dss, + &rustls::crypto::ring::default_provider().signature_verification_algorithms, + ) + } + + fn supported_verify_schemes(&self) -> Vec { + rustls::crypto::ring::default_provider() + .signature_verification_algorithms + .supported_schemes() + } +} + +/// `ServerCertVerifier` that verifies that the server is signed by a trusted root, but allows any serverName +/// see the trait impl for more information. +#[derive(Debug)] +pub struct WebPkiVerifierAnyServerName { + roots: RootCertStore, +} + +#[allow(unreachable_pub)] +impl WebPkiVerifierAnyServerName { + /// Constructs a new `WebPkiVerifierAnyServerName`. + /// + /// `roots` is the set of trust anchors to trust for issuing server certs. + pub fn new(roots: RootCertStore) -> Self { + Self { roots } + } +} diff --git a/io/zenoh-link-commons/src/unicast.rs b/io/zenoh-link-commons/src/unicast.rs index 1237024ca9..fe87e70e94 100644 --- a/io/zenoh-link-commons/src/unicast.rs +++ b/io/zenoh-link-commons/src/unicast.rs @@ -12,13 +12,13 @@ // ZettaScale Zenoh Team, // use alloc::{boxed::Box, string::String, sync::Arc, vec::Vec}; -use async_std::net::SocketAddr; use async_trait::async_trait; use core::{ fmt, hash::{Hash, Hasher}, ops::Deref, }; +use std::net::SocketAddr; use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_result::ZResult; @@ -28,8 +28,8 @@ pub trait LinkManagerUnicastTrait: Send + Sync { async fn new_link(&self, endpoint: EndPoint) -> ZResult; async fn new_listener(&self, endpoint: EndPoint) -> ZResult; async fn del_listener(&self, endpoint: &EndPoint) -> ZResult<()>; - fn get_listeners(&self) -> Vec; - fn get_locators(&self) -> Vec; + async fn get_listeners(&self) -> Vec; + async fn get_locators(&self) -> Vec; } pub type NewLinkChannelSender = flume::Sender; pub trait ConstructibleLinkManagerUnicast: Sized { diff --git a/io/zenoh-link/Cargo.toml b/io/zenoh-link/Cargo.toml index 46d4cbb8bc..25d30903da 100644 --- a/io/zenoh-link/Cargo.toml +++ b/io/zenoh-link/Cargo.toml @@ -35,7 +35,6 @@ transport_serial = ["zenoh-link-serial"] transport_unixpipe = ["zenoh-link-unixpipe", "zenoh-link-unixpipe/transport_unixpipe"] [dependencies] -async-std = { workspace = true } async-trait = { workspace = true } rcgen = { workspace = true, optional = true } zenoh-config = { workspace = true } diff --git a/io/zenoh-link/src/lib.rs b/io/zenoh-link/src/lib.rs index 18a464cb93..0e3e5879a8 100644 --- a/io/zenoh-link/src/lib.rs +++ b/io/zenoh-link/src/lib.rs @@ -153,7 +153,7 @@ pub struct LinkConfigurator { impl LinkConfigurator { #[allow(unused_variables, unused_mut)] - pub async fn configurations( + pub fn configurations( &self, config: &Config, ) -> ( @@ -174,21 +174,21 @@ impl LinkConfigurator { { insert_config( QUIC_LOCATOR_PREFIX.into(), - self.quic_inspector.inspect_config(config).await, + self.quic_inspector.inspect_config(config), ); } #[cfg(feature = "transport_tls")] { insert_config( TLS_LOCATOR_PREFIX.into(), - self.tls_inspector.inspect_config(config).await, + self.tls_inspector.inspect_config(config), ); } #[cfg(feature = "transport_unixpipe")] { insert_config( UNIXPIPE_LOCATOR_PREFIX.into(), - self.unixpipe_inspector.inspect_config(config).await, + self.unixpipe_inspector.inspect_config(config), ); } (configs, errors) diff --git a/io/zenoh-links/zenoh-link-quic/Cargo.toml b/io/zenoh-links/zenoh-link-quic/Cargo.toml index 09debbaa54..496830b5ef 100644 --- a/io/zenoh-links/zenoh-link-quic/Cargo.toml +++ b/io/zenoh-links/zenoh-link-quic/Cargo.toml @@ -25,15 +25,17 @@ description = "Internal crate for zenoh." # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -async-rustls = { workspace = true } -async-std = { workspace = true, features = ["unstable", "tokio1"] } async-trait = { workspace = true } +base64 = { workspace = true } futures = { workspace = true } log = { workspace = true } quinn = { workspace = true } -rustls = { workspace = true } rustls-native-certs = { workspace = true } rustls-pemfile = { workspace = true } +rustls-webpki = { workspace = true } +secrecy = {workspace = true } +tokio = { workspace = true, features = ["io-util", "net", "fs", "sync", "time"] } +tokio-util = { workspace = true, features = ["rt"] } zenoh-config = { workspace = true } zenoh-core = { workspace = true } zenoh-link-commons = { workspace = true } @@ -41,5 +43,8 @@ zenoh-protocol = { workspace = true } zenoh-result = { workspace = true } zenoh-sync = { workspace = true } zenoh-util = { workspace = true } -base64 = { workspace = true } -secrecy = { workspace = true } +zenoh-runtime = { workspace = true } + +# Lock due to quinn not supporting rustls 0.22 yet +rustls = { version = "0.21", features = ["dangerous_configuration", "quic"] } +tokio-rustls = "0.24.1" diff --git a/io/zenoh-links/zenoh-link-quic/src/lib.rs b/io/zenoh-links/zenoh-link-quic/src/lib.rs index 4f268200a2..c6d7e16087 100644 --- a/io/zenoh-links/zenoh-link-quic/src/lib.rs +++ b/io/zenoh-links/zenoh-link-quic/src/lib.rs @@ -17,7 +17,6 @@ //! This crate is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use async_std::net::ToSocketAddrs; use async_trait::async_trait; use config::{ TLS_ROOT_CA_CERTIFICATE_BASE64, TLS_ROOT_CA_CERTIFICATE_FILE, TLS_SERVER_CERTIFICATE_BASE64, @@ -68,9 +67,8 @@ impl LocatorInspector for QuicLocatorInspector { #[derive(Default, Clone, Copy, Debug)] pub struct QuicConfigurator; -#[async_trait] impl ConfigurationInspector for QuicConfigurator { - async fn inspect_config(&self, config: &Config) -> ZResult { + fn inspect_config(&self, config: &Config) -> ZResult { let mut ps: Vec<(&str, &str)> = vec![]; let c = config.transport().link().tls(); @@ -168,7 +166,7 @@ pub mod config { } async fn get_quic_addr(address: &Address<'_>) -> ZResult { - match address.as_str().to_socket_addrs().await?.next() { + match tokio::net::lookup_host(address.as_str()).await?.next() { Some(addr) => Ok(addr), None => bail!("Couldn't resolve QUIC locator address: {}", address), } diff --git a/io/zenoh-links/zenoh-link-quic/src/unicast.rs b/io/zenoh-links/zenoh-link-quic/src/unicast.rs index 366860801e..33953d666d 100644 --- a/io/zenoh-links/zenoh-link-quic/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-quic/src/unicast.rs @@ -17,19 +17,17 @@ use crate::{ config::*, get_quic_addr, verify::WebPkiVerifierAnyServerName, ALPN_QUIC_HTTP, QUIC_ACCEPT_THROTTLE_TIME, QUIC_DEFAULT_MTU, QUIC_LOCATOR_PREFIX, }; -use async_std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; -use async_std::prelude::FutureExt; -use async_std::sync::Mutex as AsyncMutex; -use async_std::task; use async_trait::async_trait; use rustls::{Certificate, PrivateKey}; use rustls_pemfile::Item; use std::fmt; use std::io::BufReader; use std::net::IpAddr; -use std::sync::atomic::{AtomicBool, Ordering}; +use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; use std::time::Duration; +use tokio::sync::Mutex as AsyncMutex; +use tokio_util::sync::CancellationToken; use zenoh_core::zasynclock; use zenoh_link_commons::{ get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, @@ -37,7 +35,6 @@ use zenoh_link_commons::{ }; use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_result::{bail, zerror, ZError, ZResult}; -use zenoh_sync::Signal; pub struct LinkUnicastQuic { connection: quinn::Connection, @@ -230,7 +227,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { } else if let Some(b64_certificate) = epconf.get(TLS_ROOT_CA_CERTIFICATE_BASE64) { base64_decode(b64_certificate)? } else if let Some(value) = epconf.get(TLS_ROOT_CA_CERTIFICATE_FILE) { - async_std::fs::read(value) + tokio::fs::read(value) .await .map_err(|e| zerror!("Invalid QUIC CA certificate file: {}", e))? } else { @@ -322,7 +319,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { } else if let Some(b64_certificate) = epconf.get(TLS_SERVER_CERTIFICATE_BASE64) { base64_decode(b64_certificate)? } else if let Some(value) = epconf.get(TLS_SERVER_CERTIFICATE_FILE) { - async_std::fs::read(value) + tokio::fs::read(value) .await .map_err(|e| zerror!("Invalid QUIC CA certificate file: {}", e))? } else { @@ -342,17 +339,15 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { } else if let Some(b64_key) = epconf.get(TLS_SERVER_PRIVATE_KEY_BASE64) { base64_decode(b64_key)? } else if let Some(value) = epconf.get(TLS_SERVER_PRIVATE_KEY_FILE) { - async_std::fs::read(value) + tokio::fs::read(value) .await .map_err(|e| zerror!("Invalid QUIC CA certificate file: {}", e))? } else { bail!("No QUIC CA private key has been provided."); }; let items: Vec = rustls_pemfile::read_all(&mut BufReader::new(f.as_slice())) - .map(|result| { - result.map_err(|err| zerror!("Invalid QUIC CA private key file: {}", err)) - }) - .collect::, ZError>>()?; + .collect::>() + .map_err(|err| zerror!("Invalid QUIC CA private key file: {}", err))?; let private_key = items .into_iter() @@ -400,23 +395,19 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { endpoint.config(), )?; - let active = Arc::new(AtomicBool::new(true)); - let signal = Signal::new(); + // Spawn the accept loop for the listener + let token = self.listeners.token.child_token(); + let c_token = token.clone(); - let c_active = active.clone(); - let c_signal = signal.clone(); let c_manager = self.manager.clone(); - let handle = - task::spawn( - async move { accept_task(quic_endpoint, c_active, c_signal, c_manager).await }, - ); + let task = async move { accept_task(quic_endpoint, c_token, c_manager).await }; // Initialize the QuicAcceptor let locator = endpoint.to_locator(); self.listeners - .add_listener(endpoint, local_addr, active, signal, handle) + .add_listener(endpoint, local_addr, task, token) .await?; Ok(locator) @@ -428,27 +419,21 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { self.listeners.del_listener(addr).await } - fn get_listeners(&self) -> Vec { + async fn get_listeners(&self) -> Vec { self.listeners.get_endpoints() } - fn get_locators(&self) -> Vec { + async fn get_locators(&self) -> Vec { self.listeners.get_locators() } } async fn accept_task( endpoint: quinn::Endpoint, - active: Arc, - signal: Signal, + token: CancellationToken, manager: NewLinkChannelSender, ) -> ZResult<()> { - enum Action { - Accept(quinn::Connection), - Stop, - } - - async fn accept(acceptor: quinn::Accept<'_>) -> ZResult { + async fn accept(acceptor: quinn::Accept<'_>) -> ZResult { let qc = acceptor .await .ok_or_else(|| zerror!("Can not accept QUIC connections: acceptor closed"))?; @@ -459,12 +444,7 @@ async fn accept_task( e })?; - Ok(Action::Accept(conn)) - } - - async fn stop(signal: Signal) -> ZResult { - signal.wait().await; - Ok(Action::Stop) + Ok(conn) } let src_addr = endpoint @@ -473,51 +453,53 @@ async fn accept_task( // The accept future log::trace!("Ready to accept QUIC connections on: {:?}", src_addr); - while active.load(Ordering::Acquire) { - // Wait for incoming connections - let quic_conn = match accept(endpoint.accept()).race(stop(signal.clone())).await { - Ok(action) => match action { - Action::Accept(qc) => qc, - Action::Stop => break, - }, - Err(e) => { - log::warn!("{} Hint: increase the system open file limit.", e); - // Throttle the accept loop upon an error - // NOTE: This might be due to various factors. However, the most common case is that - // the process has reached the maximum number of open files in the system. On - // Linux systems this limit can be changed by using the "ulimit" command line - // tool. In case of systemd-based systems, this can be changed by using the - // "sysctl" command line tool. - task::sleep(Duration::from_micros(*QUIC_ACCEPT_THROTTLE_TIME)).await; - continue; - } - }; - // Get the bideractional streams. Note that we don't allow unidirectional streams. - let (send, recv) = match quic_conn.accept_bi().await { - Ok(stream) => stream, - Err(e) => { - log::warn!("QUIC connection has no streams: {:?}", e); - continue; + loop { + tokio::select! { + _ = token.cancelled() => break, + + res = accept(endpoint.accept()) => { + match res { + Ok(quic_conn) => { + // Get the bideractional streams. Note that we don't allow unidirectional streams. + let (send, recv) = match quic_conn.accept_bi().await { + Ok(stream) => stream, + Err(e) => { + log::warn!("QUIC connection has no streams: {:?}", e); + continue; + } + }; + + let dst_addr = quic_conn.remote_address(); + log::debug!("Accepted QUIC connection on {:?}: {:?}", src_addr, dst_addr); + // Create the new link object + let link = Arc::new(LinkUnicastQuic::new( + quic_conn, + src_addr, + Locator::new(QUIC_LOCATOR_PREFIX, dst_addr.to_string(), "")?, + send, + recv, + )); + + // Communicate the new link to the initial transport manager + if let Err(e) = manager.send_async(LinkUnicast(link)).await { + log::error!("{}-{}: {}", file!(), line!(), e) + } + + } + Err(e) => { + log::warn!("{} Hint: increase the system open file limit.", e); + // Throttle the accept loop upon an error + // NOTE: This might be due to various factors. However, the most common case is that + // the process has reached the maximum number of open files in the system. On + // Linux systems this limit can be changed by using the "ulimit" command line + // tool. In case of systemd-based systems, this can be changed by using the + // "sysctl" command line tool. + tokio::time::sleep(Duration::from_micros(*QUIC_ACCEPT_THROTTLE_TIME)).await; + } + } } - }; - - let dst_addr = quic_conn.remote_address(); - log::debug!("Accepted QUIC connection on {:?}: {:?}", src_addr, dst_addr); - // Create the new link object - let link = Arc::new(LinkUnicastQuic::new( - quic_conn, - src_addr, - Locator::new(QUIC_LOCATOR_PREFIX, dst_addr.to_string(), "")?, - send, - recv, - )); - - // Communicate the new link to the initial transport manager - if let Err(e) = manager.send_async(LinkUnicast(link)).await { - log::error!("{}-{}: {}", file!(), line!(), e) } } - Ok(()) } diff --git a/io/zenoh-links/zenoh-link-quic/src/verify.rs b/io/zenoh-links/zenoh-link-quic/src/verify.rs index 6278e85109..baa7864246 100644 --- a/io/zenoh-links/zenoh-link-quic/src/verify.rs +++ b/io/zenoh-links/zenoh-link-quic/src/verify.rs @@ -1,10 +1,10 @@ -use async_rustls::rustls::{ - client::{ServerCertVerified, ServerCertVerifier}, - Certificate, RootCertStore, ServerName, -}; use rustls::client::verify_server_cert_signed_by_trust_anchor; use rustls::server::ParsedCertificate; use std::time::SystemTime; +use tokio_rustls::rustls::{ + client::{ServerCertVerified, ServerCertVerifier}, + Certificate, RootCertStore, ServerName, +}; impl ServerCertVerifier for WebPkiVerifierAnyServerName { /// Will verify the certificate is valid in the following ways: @@ -18,7 +18,7 @@ impl ServerCertVerifier for WebPkiVerifierAnyServerName { _scts: &mut dyn Iterator, _ocsp_response: &[u8], now: SystemTime, - ) -> Result { + ) -> Result { let cert = ParsedCertificate::try_from(end_entity)?; verify_server_cert_signed_by_trust_anchor(&cert, &self.roots, intermediates, now)?; Ok(ServerCertVerified::assertion()) diff --git a/io/zenoh-links/zenoh-link-serial/Cargo.toml b/io/zenoh-links/zenoh-link-serial/Cargo.toml index a48755e328..6fc3aba97e 100644 --- a/io/zenoh-links/zenoh-link-serial/Cargo.toml +++ b/io/zenoh-links/zenoh-link-serial/Cargo.toml @@ -18,12 +18,12 @@ version = { workspace = true } repository = { workspace = true } homepage = { workspace = true } authors = [ - "kydos ", - "Julien Enoch ", - "Olivier Hécart ", - "Luca Cominardi ", - "Pierre Avital ", - "Gabriele Baldoni " + "kydos ", + "Julien Enoch ", + "Olivier Hécart ", + "Luca Cominardi ", + "Pierre Avital ", + "Gabriele Baldoni " ] edition = { workspace = true } license = { workspace = true } @@ -32,11 +32,11 @@ description = "Internal crate for zenoh." # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -async-std = { workspace = true, features = ["unstable", "tokio1"] } async-trait = { workspace = true } futures = { workspace = true } log = { workspace = true } -tokio = { workspace = true, features = ["io-std", "macros", "net", "rt-multi-thread", "time", "io-util"] } +tokio = { workspace = true, features = ["io-std", "macros", "net", "rt-multi-thread", "time"] } +tokio-util = { workspace = true, features = ["rt"] } uuid = { workspace = true, default-features = true } z-serial = { workspace = true } zenoh-collections = { workspace = true } @@ -46,3 +46,4 @@ zenoh-protocol = { workspace = true } zenoh-result = { workspace = true } zenoh-sync = { workspace = true } zenoh-util = { workspace = true } +zenoh-runtime = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-serial/src/unicast.rs b/io/zenoh-links/zenoh-link-serial/src/unicast.rs index fafac4c393..0efa40ee90 100644 --- a/io/zenoh-links/zenoh-link-serial/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-serial/src/unicast.rs @@ -12,25 +12,25 @@ // ZettaScale Zenoh Team, // -use async_std::prelude::*; -use async_std::sync::Mutex as AsyncMutex; -use async_std::task::JoinHandle; -use async_std::task::{self}; use async_trait::async_trait; use std::cell::UnsafeCell; use std::collections::HashMap; use std::fmt; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::{Arc, RwLock}; +use std::sync::{ + atomic::{AtomicBool, Ordering}, + Arc, +}; use std::time::Duration; -use zenoh_core::{zasynclock, zread, zwrite}; +use tokio::sync::{Mutex as AsyncMutex, RwLock as AsyncRwLock}; +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; +use zenoh_core::{zasynclock, zasyncread, zasyncwrite}; use zenoh_link_commons::{ ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_result::{zerror, ZResult}; -use zenoh_sync::Signal; use z_serial::ZSerial; @@ -150,7 +150,7 @@ impl LinkUnicastTrait for LinkUnicastSerial { let e = zerror!("Read error on Serial link {}: {}", self, e); log::error!("{}", e); drop(_guard); - async_std::task::sleep(std::time::Duration::from_millis(1)).await; + tokio::time::sleep(std::time::Duration::from_millis(1)).await; continue; } } @@ -229,37 +229,34 @@ impl fmt::Debug for LinkUnicastSerial { /*************************************/ struct ListenerUnicastSerial { endpoint: EndPoint, - active: Arc, - signal: Signal, + token: CancellationToken, handle: JoinHandle>, } impl ListenerUnicastSerial { - fn new( - endpoint: EndPoint, - active: Arc, - signal: Signal, - handle: JoinHandle>, - ) -> Self { + fn new(endpoint: EndPoint, token: CancellationToken, handle: JoinHandle>) -> Self { Self { endpoint, - active, - signal, + token, handle, } } + + async fn stop(&self) { + self.token.cancel(); + } } pub struct LinkManagerUnicastSerial { manager: NewLinkChannelSender, - listeners: Arc>>, + listeners: Arc>>, } impl LinkManagerUnicastSerial { pub fn new(manager: NewLinkChannelSender) -> Self { Self { manager, - listeners: Arc::new(RwLock::new(HashMap::new())), + listeners: Arc::new(AsyncRwLock::new(HashMap::new())), } } } @@ -323,32 +320,25 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastSerial { )); // Spawn the accept loop for the listener - let active = Arc::new(AtomicBool::new(true)); - let signal = Signal::new(); - let mut listeners = zwrite!(self.listeners); + let token = CancellationToken::new(); + let c_token = token.clone(); + let mut listeners = zasyncwrite!(self.listeners); let c_path = path.clone(); - let c_active = active.clone(); - let c_signal = signal.clone(); let c_manager = self.manager.clone(); let c_listeners = self.listeners.clone(); - let handle = task::spawn(async move { + + let task = async move { // Wait for the accept loop to terminate - let res = accept_read_task( - link, - c_active, - c_signal, - c_manager, - c_path.clone(), - is_connected, - ) - .await; - zwrite!(c_listeners).remove(&c_path); + let res = + accept_read_task(link, c_token, c_manager, c_path.clone(), is_connected).await; + zasyncwrite!(c_listeners).remove(&c_path); res - }); + }; + let handle = zenoh_runtime::ZRuntime::Acceptor.spawn(task); let locator = endpoint.to_locator(); - let listener = ListenerUnicastSerial::new(endpoint, active, signal, handle); + let listener = ListenerUnicastSerial::new(endpoint, token, handle); // Update the list of active listeners on the manager listeners.insert(path, listener); @@ -359,7 +349,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastSerial { let path = get_unix_path_as_string(endpoint.address()); // Stop the listener - let listener = zwrite!(self.listeners).remove(&path).ok_or_else(|| { + let listener = zasyncwrite!(self.listeners).remove(&path).ok_or_else(|| { let e = zerror!( "Can not delete the Serial listener because it has not been found: {}", path @@ -369,20 +359,19 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastSerial { })?; // Send the stop signal - listener.active.store(false, Ordering::Release); - listener.signal.trigger(); - listener.handle.await + listener.stop().await; + listener.handle.await? } - fn get_listeners(&self) -> Vec { - zread!(self.listeners) + async fn get_listeners(&self) -> Vec { + zasyncread!(self.listeners) .values() .map(|l| l.endpoint.clone()) .collect() } - fn get_locators(&self) -> Vec { - zread!(self.listeners) + async fn get_locators(&self) -> Vec { + zasyncread!(self.listeners) .values() .map(|x| x.endpoint.to_locator()) .collect() @@ -391,72 +380,56 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastSerial { async fn accept_read_task( link: Arc, - active: Arc, - signal: Signal, + token: CancellationToken, manager: NewLinkChannelSender, src_path: String, is_connected: Arc, ) -> ZResult<()> { - enum Action { - Receive(Arc), - Stop, - } - - async fn stop(signal: Signal) -> ZResult { - signal.wait().await; - Ok(Action::Stop) - } - async fn receive( link: Arc, - active: Arc, src_path: String, is_connected: Arc, - ) -> ZResult { - while active.load(Ordering::Acquire) { - if !is_connected.load(Ordering::Acquire) { - if !link.is_ready() { - // Waiting to be ready, if not sleep some time. - task::sleep(Duration::from_micros(*SERIAL_ACCEPT_THROTTLE_TIME)).await; - continue; - } - - log::trace!("Creating serial link from {:?}", src_path); - - is_connected.store(true, Ordering::Release); - - return Ok(Action::Receive(link.clone())); - } + ) -> ZResult> { + while !is_connected.load(Ordering::Acquire) && !link.is_ready() { + // Waiting to be ready, if not sleep some time. + tokio::time::sleep(Duration::from_micros(*SERIAL_ACCEPT_THROTTLE_TIME)).await; } - Ok(Action::Stop) + + log::trace!("Creating serial link from {:?}", src_path); + is_connected.store(true, Ordering::Release); + Ok(link.clone()) } log::trace!("Ready to accept Serial connections on: {:?}", src_path); loop { - match receive( - link.clone(), - active.clone(), - src_path.clone(), - is_connected.clone(), - ) - .race(stop(signal.clone())) - .await - { - Ok(action) => match action { - Action::Receive(link) => { - // Communicate the new link to the initial transport manager - if let Err(e) = manager.send_async(LinkUnicast(link.clone())).await { - log::error!("{}-{}: {}", file!(), line!(), e) + tokio::select! { + res = receive( + link.clone(), + src_path.clone(), + is_connected.clone(), + ) => { + match res { + Ok(link) => { + // Communicate the new link to the initial transport manager + if let Err(e) = manager.send_async(LinkUnicast(link.clone())).await { + log::error!("{}-{}: {}", file!(), line!(), e) + } + + // Ensure the creation of this link is only once + break; + } + Err(e) => { + log::warn!("{}. Hint: Is the serial cable connected?", e); + tokio::time::sleep(Duration::from_micros(*SERIAL_ACCEPT_THROTTLE_TIME)).await; + continue; + } } - Action::Stop => break Ok(()), }, - Err(e) => { - log::warn!("{}. Hint: Is the serial cable connected?", e); - task::sleep(Duration::from_micros(*SERIAL_ACCEPT_THROTTLE_TIME)).await; - continue; - } + + _ = token.cancelled() => break, } } + Ok(()) } diff --git a/io/zenoh-links/zenoh-link-tcp/Cargo.toml b/io/zenoh-links/zenoh-link-tcp/Cargo.toml index 9c4725ff03..b638f97443 100644 --- a/io/zenoh-links/zenoh-link-tcp/Cargo.toml +++ b/io/zenoh-links/zenoh-link-tcp/Cargo.toml @@ -25,8 +25,9 @@ description = "Internal crate for zenoh." # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -async-std = { workspace = true } async-trait = { workspace = true } +tokio = { workspace = true, features = ["net", "io-util", "rt", "time"] } +tokio-util = { workspace = true, features = ["rt"] } log = { workspace = true } zenoh-core = { workspace = true } zenoh-link-commons = { workspace = true } @@ -34,3 +35,4 @@ zenoh-protocol = { workspace = true } zenoh-result = { workspace = true } zenoh-sync = { workspace = true } zenoh-util = { workspace = true } +zenoh-runtime = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-tcp/src/lib.rs b/io/zenoh-links/zenoh-link-tcp/src/lib.rs index 709b6a7b4b..1a7d6ae705 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/lib.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/lib.rs @@ -17,7 +17,6 @@ //! This crate is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use async_std::net::ToSocketAddrs; use async_trait::async_trait; use std::net::SocketAddr; use zenoh_core::zconfigurable; @@ -65,9 +64,7 @@ zconfigurable! { } pub async fn get_tcp_addrs(address: Address<'_>) -> ZResult> { - let iter = address - .as_str() - .to_socket_addrs() + let iter = tokio::net::lookup_host(address.as_str().to_string()) .await .map_err(|e| zerror!("{}", e))? .filter(|x| !x.ip().is_multicast()); diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index b01d8be22e..5909b2ffe7 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -11,32 +11,31 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::net::{SocketAddr, TcpListener, TcpStream}; -use async_std::prelude::*; -use async_std::task; use async_trait::async_trait; +use std::cell::UnsafeCell; use std::convert::TryInto; use std::fmt; -use std::net::Shutdown; -use std::sync::atomic::{AtomicBool, Ordering}; +use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; +use tokio::io::{AsyncReadExt, AsyncWriteExt}; +use tokio_util::sync::CancellationToken; use zenoh_link_commons::{ get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, }; use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; -use zenoh_sync::Signal; use super::{ get_tcp_addrs, TCP_ACCEPT_THROTTLE_TIME, TCP_DEFAULT_MTU, TCP_LINGER_TIMEOUT, TCP_LOCATOR_PREFIX, }; +use tokio::net::{TcpListener, TcpSocket, TcpStream}; pub struct LinkUnicastTcp { - // The underlying socket as returned from the async-std library - socket: TcpStream, + // The underlying socket as returned from the tokio library + socket: UnsafeCell, // The source socket address of this link (address used on the local host) src_addr: SocketAddr, src_locator: Locator, @@ -45,6 +44,8 @@ pub struct LinkUnicastTcp { dst_locator: Locator, } +unsafe impl Sync for LinkUnicastTcp {} + impl LinkUnicastTcp { fn new(socket: TcpStream, src_addr: SocketAddr, dst_addr: SocketAddr) -> LinkUnicastTcp { // Set the TCP nodelay option @@ -58,12 +59,9 @@ impl LinkUnicastTcp { } // Set the TCP linger option - if let Err(err) = zenoh_util::net::set_linger( - &socket, - Some(Duration::from_secs( - (*TCP_LINGER_TIMEOUT).try_into().unwrap(), - )), - ) { + if let Err(err) = socket.set_linger(Some(Duration::from_secs( + (*TCP_LINGER_TIMEOUT).try_into().unwrap(), + ))) { log::warn!( "Unable to set LINGER option on TCP link {} => {}: {}", src_addr, @@ -74,13 +72,17 @@ impl LinkUnicastTcp { // Build the Tcp object LinkUnicastTcp { - socket, + socket: UnsafeCell::new(socket), src_addr, src_locator: Locator::new(TCP_LOCATOR_PREFIX, src_addr.to_string(), "").unwrap(), dst_addr, dst_locator: Locator::new(TCP_LOCATOR_PREFIX, dst_addr.to_string(), "").unwrap(), } } + #[allow(clippy::mut_from_ref)] + fn get_mut_socket(&self) -> &mut TcpStream { + unsafe { &mut *self.socket.get() } + } } #[async_trait] @@ -88,7 +90,7 @@ impl LinkUnicastTrait for LinkUnicastTcp { async fn close(&self) -> ZResult<()> { log::trace!("Closing TCP link: {}", self); // Close the underlying TCP socket - self.socket.shutdown(Shutdown::Both).map_err(|e| { + self.get_mut_socket().shutdown().await.map_err(|e| { let e = zerror!("TCP link shutdown {}: {:?}", self, e); log::trace!("{}", e); e.into() @@ -96,7 +98,7 @@ impl LinkUnicastTrait for LinkUnicastTcp { } async fn write(&self, buffer: &[u8]) -> ZResult { - (&self.socket).write(buffer).await.map_err(|e| { + self.get_mut_socket().write(buffer).await.map_err(|e| { let e = zerror!("Write error on TCP link {}: {}", self, e); log::trace!("{}", e); e.into() @@ -104,7 +106,7 @@ impl LinkUnicastTrait for LinkUnicastTcp { } async fn write_all(&self, buffer: &[u8]) -> ZResult<()> { - (&self.socket).write_all(buffer).await.map_err(|e| { + self.get_mut_socket().write_all(buffer).await.map_err(|e| { let e = zerror!("Write error on TCP link {}: {}", self, e); log::trace!("{}", e); e.into() @@ -112,7 +114,7 @@ impl LinkUnicastTrait for LinkUnicastTcp { } async fn read(&self, buffer: &mut [u8]) -> ZResult { - (&self.socket).read(buffer).await.map_err(|e| { + self.get_mut_socket().read(buffer).await.map_err(|e| { let e = zerror!("Read error on TCP link {}: {}", self, e); log::trace!("{}", e); e.into() @@ -120,11 +122,16 @@ impl LinkUnicastTrait for LinkUnicastTcp { } async fn read_exact(&self, buffer: &mut [u8]) -> ZResult<()> { - (&self.socket).read_exact(buffer).await.map_err(|e| { - let e = zerror!("Read error on TCP link {}: {}", self, e); - log::trace!("{}", e); - e.into() - }) + let _ = self + .get_mut_socket() + .read_exact(buffer) + .await + .map_err(|e| { + let e = zerror!("Read error on TCP link {}: {}", self, e); + log::trace!("{}", e); + e + })?; + Ok(()) } #[inline(always)] @@ -158,12 +165,17 @@ impl LinkUnicastTrait for LinkUnicastTcp { } } -impl Drop for LinkUnicastTcp { - fn drop(&mut self) { - // Close the underlying TCP socket - let _ = self.socket.shutdown(Shutdown::Both); - } -} +// // WARN: This sometimes causes timeout in routing test +// // WARN assume the drop of TcpStream would clean itself +// // https://docs.rs/tokio/latest/tokio/net/struct.TcpStream.html#method.into_split +// impl Drop for LinkUnicastTcp { +// fn drop(&mut self) { +// // Close the underlying TCP socket +// zenoh_runtime::ZRuntime::TX.block_in_place(async { +// let _ = self.get_mut_socket().shutdown().await; +// }); +// } +// } impl fmt::Display for LinkUnicastTcp { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -201,7 +213,17 @@ impl LinkManagerUnicastTcp { dst_addr: &SocketAddr, iface: Option<&str>, ) -> ZResult<(TcpStream, SocketAddr, SocketAddr)> { - let stream = TcpStream::connect(dst_addr) + let socket = match dst_addr { + SocketAddr::V4(_) => TcpSocket::new_v4(), + SocketAddr::V6(_) => TcpSocket::new_v6(), + }?; + + zenoh_util::net::set_bind_to_device_tcp_socket(&socket, iface)?; + + // Build a TcpStream from TcpSocket + // https://docs.rs/tokio/latest/tokio/net/struct.TcpSocket.html + let stream = socket + .connect(*dst_addr) .await .map_err(|e| zerror!("{}: {}", dst_addr, e))?; @@ -213,8 +235,6 @@ impl LinkManagerUnicastTcp { .peer_addr() .map_err(|e| zerror!("{}: {}", dst_addr, e))?; - zenoh_util::net::set_bind_to_device_tcp_stream(&stream, iface); - Ok((stream, src_addr, dst_addr)) } @@ -223,18 +243,27 @@ impl LinkManagerUnicastTcp { addr: &SocketAddr, iface: Option<&str>, ) -> ZResult<(TcpListener, SocketAddr)> { - // Bind the TCP socket - let socket = TcpListener::bind(addr) - .await + let socket = match addr { + SocketAddr::V4(_) => TcpSocket::new_v4(), + SocketAddr::V6(_) => TcpSocket::new_v6(), + }?; + + zenoh_util::net::set_bind_to_device_tcp_socket(&socket, iface)?; + + // Build a TcpListener from TcpSocket + // https://docs.rs/tokio/latest/tokio/net/struct.TcpSocket.html + socket.set_reuseaddr(true)?; + socket.bind(*addr).map_err(|e| zerror!("{}: {}", addr, e))?; + // backlog (the maximum number of pending connections are queued): 1024 + let listener = socket + .listen(1024) .map_err(|e| zerror!("{}: {}", addr, e))?; - zenoh_util::net::set_bind_to_device_tcp_listener(&socket, iface); - - let local_addr = socket + let local_addr = listener .local_addr() .map_err(|e| zerror!("{}: {}", addr, e))?; - Ok((socket, local_addr)) + Ok((listener, local_addr)) } } @@ -286,21 +315,15 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastTcp { endpoint.config(), )?; - let active = Arc::new(AtomicBool::new(true)); - let signal = Signal::new(); + let token = self.listeners.token.child_token(); + let c_token = token.clone(); - let c_active = active.clone(); - let c_signal = signal.clone(); let c_manager = self.manager.clone(); - - let handle = task::spawn(async move { - accept_task(socket, c_active, c_signal, c_manager).await - }); + let task = async move { accept_task(socket, c_token, c_manager).await }; let locator = endpoint.to_locator(); - self.listeners - .add_listener(endpoint, local_addr, active, signal, handle) + .add_listener(endpoint, local_addr, task, token) .await?; return Ok(locator); @@ -350,34 +373,23 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastTcp { Ok(()) } - fn get_listeners(&self) -> Vec { + async fn get_listeners(&self) -> Vec { self.listeners.get_endpoints() } - fn get_locators(&self) -> Vec { + async fn get_locators(&self) -> Vec { self.listeners.get_locators() } } async fn accept_task( socket: TcpListener, - active: Arc, - signal: Signal, + token: CancellationToken, manager: NewLinkChannelSender, ) -> ZResult<()> { - enum Action { - Accept((TcpStream, SocketAddr)), - Stop, - } - - async fn accept(socket: &TcpListener) -> ZResult { + async fn accept(socket: &TcpListener) -> ZResult<(TcpStream, SocketAddr)> { let res = socket.accept().await.map_err(|e| zerror!(e))?; - Ok(Action::Accept(res)) - } - - async fn stop(signal: Signal) -> ZResult { - signal.wait().await; - Ok(Action::Stop) + Ok(res) } let src_addr = socket.local_addr().map_err(|e| { @@ -387,34 +399,35 @@ async fn accept_task( })?; log::trace!("Ready to accept TCP connections on: {:?}", src_addr); - while active.load(Ordering::Acquire) { - // Wait for incoming connections - let (stream, dst_addr) = match accept(&socket).race(stop(signal.clone())).await { - Ok(action) => match action { - Action::Accept((stream, addr)) => (stream, addr), - Action::Stop => break, - }, - Err(e) => { - log::warn!("{}. Hint: increase the system open file limit.", e); - // Throttle the accept loop upon an error - // NOTE: This might be due to various factors. However, the most common case is that - // the process has reached the maximum number of open files in the system. On - // Linux systems this limit can be changed by using the "ulimit" command line - // tool. In case of systemd-based systems, this can be changed by using the - // "sysctl" command line tool. - task::sleep(Duration::from_micros(*TCP_ACCEPT_THROTTLE_TIME)).await; - continue; + loop { + tokio::select! { + _ = token.cancelled() => break, + res = accept(&socket) => { + match res { + Ok((stream, dst_addr)) => { + log::debug!("Accepted TCP connection on {:?}: {:?}", src_addr, dst_addr); + // Create the new link object + let link = Arc::new(LinkUnicastTcp::new(stream, src_addr, dst_addr)); + + // Communicate the new link to the initial transport manager + if let Err(e) = manager.send_async(LinkUnicast(link)).await { + log::error!("{}-{}: {}", file!(), line!(), e) + } + }, + Err(e) => { + log::warn!("{}. Hint: increase the system open file limit.", e); + // Throttle the accept loop upon an error + // NOTE: This might be due to various factors. However, the most common case is that + // the process has reached the maximum number of open files in the system. On + // Linux systems this limit can be changed by using the "ulimit" command line + // tool. In case of systemd-based systems, this can be changed by using the + // "sysctl" command line tool. + tokio::time::sleep(Duration::from_micros(*TCP_ACCEPT_THROTTLE_TIME)).await; + } + + } } }; - - log::debug!("Accepted TCP connection on {:?}: {:?}", src_addr, dst_addr); - // Create the new link object - let link = Arc::new(LinkUnicastTcp::new(stream, src_addr, dst_addr)); - - // Communicate the new link to the initial transport manager - if let Err(e) = manager.send_async(LinkUnicast(link)).await { - log::error!("{}-{}: {}", file!(), line!(), e) - } } Ok(()) diff --git a/io/zenoh-links/zenoh-link-tls/Cargo.toml b/io/zenoh-links/zenoh-link-tls/Cargo.toml index 5d047b1160..975fa49467 100644 --- a/io/zenoh-links/zenoh-link-tls/Cargo.toml +++ b/io/zenoh-links/zenoh-link-tls/Cargo.toml @@ -25,21 +25,24 @@ description = "Internal crate for zenoh." # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -async-rustls = { workspace = true } -rustls = { workspace = true } -async-std = { workspace = true } async-trait = { workspace = true } +base64 = { workspace = true } futures = { workspace = true } log = { workspace = true } +rustls = { workspace = true } rustls-pemfile = { workspace = true } +rustls-pki-types = { workspace = true } rustls-webpki = { workspace = true } +secrecy = {workspace = true } +tokio = { workspace = true, features = ["io-util", "net", "fs", "sync"] } +tokio-rustls = { workspace = true } +tokio-util = { workspace = true, features = ["rt"] } webpki-roots = { workspace = true } zenoh-config = { workspace = true } zenoh-core = { workspace = true } zenoh-link-commons = { workspace = true } zenoh-protocol = { workspace = true } zenoh-result = { workspace = true } +zenoh-runtime = { workspace = true } zenoh-sync = { workspace = true } zenoh-util = { workspace = true } -base64 = { workspace = true } -secrecy = {workspace = true } \ No newline at end of file diff --git a/io/zenoh-links/zenoh-link-tls/src/lib.rs b/io/zenoh-links/zenoh-link-tls/src/lib.rs index 9b45b5e68b..95d59104b4 100644 --- a/io/zenoh-links/zenoh-link-tls/src/lib.rs +++ b/io/zenoh-links/zenoh-link-tls/src/lib.rs @@ -17,8 +17,6 @@ //! This crate is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use async_rustls::rustls::ServerName; -use async_std::net::ToSocketAddrs; use async_trait::async_trait; use config::{ TLS_CLIENT_AUTH, TLS_CLIENT_CERTIFICATE_BASE64, TLS_CLIENT_CERTIFICATE_FILE, @@ -26,6 +24,7 @@ use config::{ TLS_ROOT_CA_CERTIFICATE_FILE, TLS_SERVER_CERTIFICATE_BASE64, TLS_SERVER_CERTIFICATE_FILE, TLS_SERVER_NAME_VERIFICATION, TLS_SERVER_PRIVATE_KEY_BASE_64, TLS_SERVER_PRIVATE_KEY_FILE, }; +use rustls_pki_types::ServerName; use secrecy::ExposeSecret; use std::{convert::TryFrom, net::SocketAddr}; use zenoh_config::Config; @@ -38,7 +37,6 @@ use zenoh_protocol::core::{ use zenoh_result::{bail, zerror, ZResult}; mod unicast; -mod verify; pub use unicast::*; // Default MTU (TLS PDU) in bytes. @@ -65,9 +63,8 @@ impl LocatorInspector for TlsLocatorInspector { #[derive(Default, Clone, Copy, Debug)] pub struct TlsConfigurator; -#[async_trait] impl ConfigurationInspector for TlsConfigurator { - async fn inspect_config(&self, config: &Config) -> ZResult { + fn inspect_config(&self, config: &Config) -> ZResult { let mut ps: Vec<(&str, &str)> = vec![]; let c = config.transport().link().tls(); @@ -213,7 +210,7 @@ pub mod config { } pub async fn get_tls_addr(address: &Address<'_>) -> ZResult { - match address.as_str().to_socket_addrs().await?.next() { + match tokio::net::lookup_host(address.as_str()).await?.next() { Some(addr) => Ok(addr), None => bail!("Couldn't resolve TLS locator address: {}", address), } @@ -227,7 +224,7 @@ pub fn get_tls_host<'a>(address: &'a Address<'a>) -> ZResult<&'a str> { .ok_or_else(|| zerror!("Invalid TLS address").into()) } -pub fn get_tls_server_name(address: &Address<'_>) -> ZResult { +pub fn get_tls_server_name<'a>(address: &'a Address<'a>) -> ZResult> { Ok(ServerName::try_from(get_tls_host(address)?).map_err(|e| zerror!(e))?) } diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index e3adea2dff..7da711161e 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -13,38 +13,31 @@ // use crate::{ base64_decode, config::*, get_tls_addr, get_tls_host, get_tls_server_name, - verify::WebPkiVerifierAnyServerName, TLS_ACCEPT_THROTTLE_TIME, TLS_DEFAULT_MTU, - TLS_LINGER_TIMEOUT, TLS_LOCATOR_PREFIX, + TLS_ACCEPT_THROTTLE_TIME, TLS_DEFAULT_MTU, TLS_LINGER_TIMEOUT, TLS_LOCATOR_PREFIX, }; -use async_rustls::{ - rustls::{ - server::AllowAnyAuthenticatedClient, version::TLS13, Certificate, ClientConfig, - OwnedTrustAnchor, PrivateKey, RootCertStore, ServerConfig, - }, - TlsAcceptor, TlsConnector, TlsStream, -}; -use async_std::fs; -use async_std::net::{SocketAddr, TcpListener, TcpStream}; -use async_std::prelude::FutureExt; -use async_std::sync::Mutex as AsyncMutex; -use async_std::task; use async_trait::async_trait; -use futures::io::AsyncReadExt; -use futures::io::AsyncWriteExt; +use rustls::{ + pki_types::{CertificateDer, PrivateKeyDer, TrustAnchor}, + server::WebPkiClientVerifier, + version::TLS13, + ClientConfig, RootCertStore, ServerConfig, +}; use std::convert::TryInto; use std::fmt; use std::fs::File; use std::io::{BufReader, Cursor}; -use std::net::Shutdown; -use std::sync::atomic::{AtomicBool, Ordering}; +use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; use std::{cell::UnsafeCell, io}; -use webpki::{ - anchor_from_trusted_cert, - types::{CertificateDer, TrustAnchor}, -}; +use tokio::io::{AsyncReadExt, AsyncWriteExt}; +use tokio::net::{TcpListener, TcpStream}; +use tokio::sync::Mutex as AsyncMutex; +use tokio_rustls::{TlsAcceptor, TlsConnector, TlsStream}; +use tokio_util::sync::CancellationToken; +use webpki::anchor_from_trusted_cert; use zenoh_core::zasynclock; +use zenoh_link_commons::tls::WebPkiVerifierAnyServerName; use zenoh_link_commons::{ get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, @@ -52,7 +45,6 @@ use zenoh_link_commons::{ use zenoh_protocol::core::endpoint::Config; use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_result::{bail, zerror, ZError, ZResult}; -use zenoh_sync::Signal; pub struct LinkUnicastTls { // The underlying socket as returned from the async-rustls library @@ -96,12 +88,9 @@ impl LinkUnicastTls { } // Set the TLS linger option - if let Err(err) = zenoh_util::net::set_linger( - tcp_stream, - Some(Duration::from_secs( - (*TLS_LINGER_TIMEOUT).try_into().unwrap(), - )), - ) { + if let Err(err) = tcp_stream.set_linger(Some(Duration::from_secs( + (*TLS_LINGER_TIMEOUT).try_into().unwrap(), + ))) { log::warn!( "Unable to set LINGER option on TLS link {} => {}: {}", src_addr, @@ -141,8 +130,8 @@ impl LinkUnicastTrait for LinkUnicastTls { let res = tls_stream.flush().await; log::trace!("TLS link flush {}: {:?}", self, res); // Close the underlying TCP stream - let (tcp_stream, _) = tls_stream.get_ref(); - let res = tcp_stream.shutdown(Shutdown::Both); + let (tcp_stream, _) = tls_stream.get_mut(); + let res = tcp_stream.shutdown().await; log::trace!("TLS link shutdown {}: {:?}", self, res); res.map_err(|e| zerror!(e).into()) } @@ -173,10 +162,11 @@ impl LinkUnicastTrait for LinkUnicastTls { async fn read_exact(&self, buffer: &mut [u8]) -> ZResult<()> { let _guard = zasynclock!(self.read_mtx); - self.get_sock_mut().read_exact(buffer).await.map_err(|e| { + let _ = self.get_sock_mut().read_exact(buffer).await.map_err(|e| { log::trace!("Read error on TLS link {}: {}", self, e); - zerror!(e).into() - }) + zerror!(e) + })?; + Ok(()) } #[inline(always)] @@ -213,8 +203,9 @@ impl LinkUnicastTrait for LinkUnicastTls { impl Drop for LinkUnicastTls { fn drop(&mut self) { // Close the underlying TCP stream - let (tcp_stream, _) = self.get_sock_mut().get_ref(); - let _ = tcp_stream.shutdown(Shutdown::Both); + let (tcp_stream, _) = self.get_sock_mut().get_mut(); + let _ = + zenoh_runtime::ZRuntime::TX.block_in_place(async move { tcp_stream.shutdown().await }); } } @@ -331,16 +322,11 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastTls { // Initialize the TlsAcceptor let acceptor = TlsAcceptor::from(Arc::new(tls_server_config.server_config)); - let active = Arc::new(AtomicBool::new(true)); - let signal = Signal::new(); - - let c_active = active.clone(); - let c_signal = signal.clone(); + let token = self.listeners.token.child_token(); + let c_token = token.clone(); let c_manager = self.manager.clone(); - let handle = task::spawn(async move { - accept_task(socket, acceptor, c_active, c_signal, c_manager).await - }); + let task = async move { accept_task(socket, acceptor, c_token, c_manager).await }; // Update the endpoint locator address let locator = Locator::new( @@ -350,7 +336,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastTls { )?; self.listeners - .add_listener(endpoint, local_addr, active, signal, handle) + .add_listener(endpoint, local_addr, task, token) .await?; Ok(locator) @@ -362,11 +348,11 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastTls { self.listeners.del_listener(addr).await } - fn get_listeners(&self) -> Vec { + async fn get_listeners(&self) -> Vec { self.listeners.get_endpoints() } - fn get_locators(&self) -> Vec { + async fn get_locators(&self) -> Vec { self.listeners.get_locators() } } @@ -374,23 +360,12 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastTls { async fn accept_task( socket: TcpListener, acceptor: TlsAcceptor, - active: Arc, - signal: Signal, + token: CancellationToken, manager: NewLinkChannelSender, ) -> ZResult<()> { - enum Action { - Accept((TcpStream, SocketAddr)), - Stop, - } - - async fn accept(socket: &TcpListener) -> ZResult { + async fn accept(socket: &TcpListener) -> ZResult<(TcpStream, SocketAddr)> { let res = socket.accept().await.map_err(|e| zerror!(e))?; - Ok(Action::Accept(res)) - } - - async fn stop(signal: Signal) -> ZResult { - signal.wait().await; - Ok(Action::Stop) + Ok(res) } let src_addr = socket.local_addr().map_err(|e| { @@ -400,42 +375,44 @@ async fn accept_task( })?; log::trace!("Ready to accept TLS connections on: {:?}", src_addr); - while active.load(Ordering::Acquire) { - // Wait for incoming connections - let (tcp_stream, dst_addr) = match accept(&socket).race(stop(signal.clone())).await { - Ok(action) => match action { - Action::Accept((tcp_stream, dst_addr)) => (tcp_stream, dst_addr), - Action::Stop => break, - }, - Err(e) => { - log::warn!("{}. Hint: increase the system open file limit.", e); - // Throttle the accept loop upon an error - // NOTE: This might be due to various factors. However, the most common case is that - // the process has reached the maximum number of open files in the system. On - // Linux systems this limit can be changed by using the "ulimit" command line - // tool. In case of systemd-based systems, this can be changed by using the - // "sysctl" command line tool. - task::sleep(Duration::from_micros(*TLS_ACCEPT_THROTTLE_TIME)).await; - continue; - } - }; - // Accept the TLS connection - let tls_stream = match acceptor.accept(tcp_stream).await { - Ok(stream) => TlsStream::Server(stream), - Err(e) => { - let e = format!("Can not accept TLS connection: {e}"); - log::warn!("{}", e); - continue; + loop { + tokio::select! { + _ = token.cancelled() => break, + + res = accept(&socket) => { + match res { + Ok((tcp_stream, dst_addr)) => { + // Accept the TLS connection + let tls_stream = match acceptor.accept(tcp_stream).await { + Ok(stream) => TlsStream::Server(stream), + Err(e) => { + let e = format!("Can not accept TLS connection: {e}"); + log::warn!("{}", e); + continue; + } + }; + + log::debug!("Accepted TLS connection on {:?}: {:?}", src_addr, dst_addr); + // Create the new link object + let link = Arc::new(LinkUnicastTls::new(tls_stream, src_addr, dst_addr)); + + // Communicate the new link to the initial transport manager + if let Err(e) = manager.send_async(LinkUnicast(link)).await { + log::error!("{}-{}: {}", file!(), line!(), e) + } + } + Err(e) => { + log::warn!("{}. Hint: increase the system open file limit.", e); + // Throttle the accept loop upon an error + // NOTE: This might be due to various factors. However, the most common case is that + // the process has reached the maximum number of open files in the system. On + // Linux systems this limit can be changed by using the "ulimit" command line + // tool. In case of systemd-based systems, this can be changed by using the + // "sysctl" command line tool. + tokio::time::sleep(Duration::from_micros(*TLS_ACCEPT_THROTTLE_TIME)).await; + } + } } - }; - - log::debug!("Accepted TLS connection on {:?}: {:?}", src_addr, dst_addr); - // Create the new link object - let link = Arc::new(LinkUnicastTls::new(tls_stream, src_addr, dst_addr)); - - // Communicate the new link to the initial transport manager - if let Err(e) = manager.send_async(LinkUnicast(link)).await { - log::error!("{}-{}: {}", file!(), line!(), e) } } @@ -457,42 +434,29 @@ impl TlsServerConfig { let tls_server_private_key = TlsServerConfig::load_tls_private_key(config).await?; let tls_server_certificate = TlsServerConfig::load_tls_certificate(config).await?; - let certs: Vec = + let certs: Vec = rustls_pemfile::certs(&mut Cursor::new(&tls_server_certificate)) - .map(|result| { - result - .map_err(|err| zerror!("Error processing server certificate: {err}.")) - .map(|der| Certificate(der.to_vec())) - }) - .collect::, ZError>>()?; - - let mut keys: Vec = + .collect::>() + .map_err(|err| zerror!("Error processing server certificate: {err}."))?; + + let mut keys: Vec = rustls_pemfile::rsa_private_keys(&mut Cursor::new(&tls_server_private_key)) - .map(|result| { - result - .map_err(|err| zerror!("Error processing server key: {err}.")) - .map(|key| PrivateKey(key.secret_pkcs1_der().to_vec())) - }) - .collect::, ZError>>()?; + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing server key: {err}."))?; if keys.is_empty() { keys = rustls_pemfile::pkcs8_private_keys(&mut Cursor::new(&tls_server_private_key)) - .map(|result| { - result - .map_err(|err| zerror!("Error processing server key: {err}.")) - .map(|key| PrivateKey(key.secret_pkcs8_der().to_vec())) - }) - .collect::, ZError>>()?; + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing server key: {err}."))?; } if keys.is_empty() { keys = rustls_pemfile::ec_private_keys(&mut Cursor::new(&tls_server_private_key)) - .map(|result| { - result - .map_err(|err| zerror!("Error processing server key: {err}.")) - .map(|key| PrivateKey(key.secret_sec1_der().to_vec())) - }) - .collect::, ZError>>()?; + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing server key: {err}."))?; } if keys.is_empty() { @@ -508,17 +472,13 @@ impl TlsServerConfig { }, Ok, )?; - ServerConfig::builder() - .with_safe_default_cipher_suites() - .with_safe_default_kx_groups() - .with_protocol_versions(&[&TLS13]) // Force TLS 1.3 - .map_err(|e| zerror!(e))? - .with_client_cert_verifier(Arc::new(AllowAnyAuthenticatedClient::new(root_cert_store))) + let client_auth = WebPkiClientVerifier::builder(root_cert_store.into()).build()?; + ServerConfig::builder_with_protocol_versions(&[&TLS13]) + .with_client_cert_verifier(client_auth) .with_single_cert(certs, keys.remove(0)) .map_err(|e| zerror!(e))? } else { ServerConfig::builder() - .with_safe_defaults() .with_no_client_auth() .with_single_cert(certs, keys.remove(0)) .map_err(|e| zerror!(e))? @@ -575,13 +535,13 @@ impl TlsClientConfig { // Allows mixed user-generated CA and webPKI CA log::debug!("Loading default Web PKI certificates."); - let mut root_cert_store: RootCertStore = RootCertStore { - roots: load_default_webpki_certs().roots, + let mut root_cert_store = RootCertStore { + roots: webpki_roots::TLS_SERVER_ROOTS.to_vec(), }; if let Some(custom_root_cert) = load_trust_anchors(config)? { log::debug!("Loading user-generated certificates."); - root_cert_store.add_trust_anchors(custom_root_cert.roots.into_iter()); + root_cert_store.extend(custom_root_cert.roots); } let cc = if tls_client_server_auth { @@ -589,54 +549,37 @@ impl TlsClientConfig { let tls_client_private_key = TlsClientConfig::load_tls_private_key(config).await?; let tls_client_certificate = TlsClientConfig::load_tls_certificate(config).await?; - let certs: Vec = + let certs: Vec = rustls_pemfile::certs(&mut Cursor::new(&tls_client_certificate)) - .map(|result| { - result - .map_err(|err| zerror!("Error processing client certificate: {err}.")) - .map(|der| Certificate(der.to_vec())) - }) - .collect::, ZError>>()?; - - let mut keys: Vec = + .collect::>() + .map_err(|err| zerror!("Error processing client certificate: {err}."))?; + + let mut keys: Vec = rustls_pemfile::rsa_private_keys(&mut Cursor::new(&tls_client_private_key)) - .map(|result| { - result - .map_err(|err| zerror!("Error processing client key: {err}.")) - .map(|key| PrivateKey(key.secret_pkcs1_der().to_vec())) - }) - .collect::, ZError>>()?; + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing client key: {err}."))?; if keys.is_empty() { keys = rustls_pemfile::pkcs8_private_keys(&mut Cursor::new(&tls_client_private_key)) - .map(|result| { - result - .map_err(|err| zerror!("Error processing client key: {err}.")) - .map(|key| PrivateKey(key.secret_pkcs8_der().to_vec())) - }) - .collect::, ZError>>()?; + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing client key: {err}."))?; } if keys.is_empty() { keys = rustls_pemfile::ec_private_keys(&mut Cursor::new(&tls_client_private_key)) - .map(|result| { - result - .map_err(|err| zerror!("Error processing client key: {err}.")) - .map(|key| PrivateKey(key.secret_sec1_der().to_vec())) - }) - .collect::, ZError>>()?; + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing client key: {err}."))?; } if keys.is_empty() { bail!("No private key found for TLS client."); } - let builder = ClientConfig::builder() - .with_safe_default_cipher_suites() - .with_safe_default_kx_groups() - .with_protocol_versions(&[&TLS13]) - .map_err(|e| zerror!("Config parameters should be valid: {}", e))?; + let builder = ClientConfig::builder_with_protocol_versions(&[&TLS13]); if tls_server_name_verification { builder @@ -644,6 +587,7 @@ impl TlsClientConfig { .with_client_auth_cert(certs, keys.remove(0)) } else { builder + .dangerous() .with_custom_certificate_verifier(Arc::new(WebPkiVerifierAnyServerName::new( root_cert_store, ))) @@ -651,13 +595,14 @@ impl TlsClientConfig { } .map_err(|e| zerror!("Bad certificate/key: {}", e))? } else { - let builder = ClientConfig::builder().with_safe_defaults(); + let builder = ClientConfig::builder(); if tls_server_name_verification { builder .with_root_certificates(root_cert_store) .with_no_client_auth() } else { builder + .dangerous() .with_custom_certificate_verifier(Arc::new(WebPkiVerifierAnyServerName::new( root_cert_store, ))) @@ -696,10 +641,14 @@ async fn load_tls_key( ) -> ZResult> { if let Some(value) = config.get(tls_private_key_raw_config_key) { return Ok(value.as_bytes().to_vec()); - } else if let Some(b64_key) = config.get(tls_private_key_base64_config_key) { + } + + if let Some(b64_key) = config.get(tls_private_key_base64_config_key) { return base64_decode(b64_key); - } else if let Some(value) = config.get(tls_private_key_file_config_key) { - return Ok(fs::read(value) + } + + if let Some(value) = config.get(tls_private_key_file_config_key) { + return Ok(tokio::fs::read(value) .await .map_err(|e| zerror!("Invalid TLS private key file: {}", e))?) .and_then(|result| { @@ -721,10 +670,14 @@ async fn load_tls_certificate( ) -> ZResult> { if let Some(value) = config.get(tls_certificate_raw_config_key) { return Ok(value.as_bytes().to_vec()); - } else if let Some(b64_certificate) = config.get(tls_certificate_base64_config_key) { + } + + if let Some(b64_certificate) = config.get(tls_certificate_base64_config_key) { return base64_decode(b64_certificate); - } else if let Some(value) = config.get(tls_certificate_file_config_key) { - return Ok(fs::read(value) + } + + if let Some(value) = config.get(tls_certificate_file_config_key) { + return Ok(tokio::fs::read(value) .await .map_err(|e| zerror!("Invalid TLS certificate file: {}", e))?); } @@ -736,7 +689,7 @@ fn load_trust_anchors(config: &Config<'_>) -> ZResult> { if let Some(value) = config.get(TLS_ROOT_CA_CERTIFICATE_RAW) { let mut pem = BufReader::new(value.as_bytes()); let trust_anchors = process_pem(&mut pem)?; - root_cert_store.add_trust_anchors(trust_anchors.into_iter()); + root_cert_store.extend(trust_anchors); return Ok(Some(root_cert_store)); } @@ -744,20 +697,20 @@ fn load_trust_anchors(config: &Config<'_>) -> ZResult> { let certificate_pem = base64_decode(b64_certificate)?; let mut pem = BufReader::new(certificate_pem.as_slice()); let trust_anchors = process_pem(&mut pem)?; - root_cert_store.add_trust_anchors(trust_anchors.into_iter()); + root_cert_store.extend(trust_anchors); return Ok(Some(root_cert_store)); } if let Some(filename) = config.get(TLS_ROOT_CA_CERTIFICATE_FILE) { let mut pem = BufReader::new(File::open(filename)?); let trust_anchors = process_pem(&mut pem)?; - root_cert_store.add_trust_anchors(trust_anchors.into_iter()); + root_cert_store.extend(trust_anchors); return Ok(Some(root_cert_store)); } Ok(None) } -fn process_pem(pem: &mut dyn io::BufRead) -> ZResult> { +fn process_pem(pem: &mut dyn io::BufRead) -> ZResult>> { let certs: Vec = rustls_pemfile::certs(pem) .map(|result| result.map_err(|err| zerror!("Error processing PEM certificates: {err}."))) .collect::, ZError>>()?; @@ -771,28 +724,5 @@ fn process_pem(pem: &mut dyn io::BufRead) -> ZResult> { }) .collect::, ZError>>()?; - let owned_trust_anchors: Vec = trust_anchors - .into_iter() - .map(|ta| { - OwnedTrustAnchor::from_subject_spki_name_constraints( - ta.subject.to_vec(), - ta.subject_public_key_info.to_vec(), - ta.name_constraints.map(|x| x.to_vec()), - ) - }) - .collect(); - - Ok(owned_trust_anchors) -} - -fn load_default_webpki_certs() -> RootCertStore { - let mut root_cert_store = RootCertStore::empty(); - root_cert_store.add_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| { - OwnedTrustAnchor::from_subject_spki_name_constraints( - ta.subject.to_vec(), - ta.subject_public_key_info.to_vec(), - ta.name_constraints.clone().map(|x| x.to_vec()), - ) - })); - root_cert_store + Ok(trust_anchors) } diff --git a/io/zenoh-links/zenoh-link-tls/src/verify.rs b/io/zenoh-links/zenoh-link-tls/src/verify.rs deleted file mode 100644 index 6278e85109..0000000000 --- a/io/zenoh-links/zenoh-link-tls/src/verify.rs +++ /dev/null @@ -1,42 +0,0 @@ -use async_rustls::rustls::{ - client::{ServerCertVerified, ServerCertVerifier}, - Certificate, RootCertStore, ServerName, -}; -use rustls::client::verify_server_cert_signed_by_trust_anchor; -use rustls::server::ParsedCertificate; -use std::time::SystemTime; - -impl ServerCertVerifier for WebPkiVerifierAnyServerName { - /// Will verify the certificate is valid in the following ways: - /// - Signed by a trusted `RootCertStore` CA - /// - Not Expired - fn verify_server_cert( - &self, - end_entity: &Certificate, - intermediates: &[Certificate], - _server_name: &ServerName, - _scts: &mut dyn Iterator, - _ocsp_response: &[u8], - now: SystemTime, - ) -> Result { - let cert = ParsedCertificate::try_from(end_entity)?; - verify_server_cert_signed_by_trust_anchor(&cert, &self.roots, intermediates, now)?; - Ok(ServerCertVerified::assertion()) - } -} - -/// `ServerCertVerifier` that verifies that the server is signed by a trusted root, but allows any serverName -/// see the trait impl for more information. -pub struct WebPkiVerifierAnyServerName { - roots: RootCertStore, -} - -#[allow(unreachable_pub)] -impl WebPkiVerifierAnyServerName { - /// Constructs a new `WebPkiVerifierAnyServerName`. - /// - /// `roots` is the set of trust anchors to trust for issuing server certs. - pub fn new(roots: RootCertStore) -> Self { - Self { roots } - } -} diff --git a/io/zenoh-links/zenoh-link-udp/Cargo.toml b/io/zenoh-links/zenoh-link-udp/Cargo.toml index aae1b01f54..bcc0f16ee4 100644 --- a/io/zenoh-links/zenoh-link-udp/Cargo.toml +++ b/io/zenoh-links/zenoh-link-udp/Cargo.toml @@ -25,7 +25,8 @@ description = "Internal crate for zenoh." # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -async-std = { workspace = true } +tokio = { workspace = true, features = ["net", "io-util", "rt", "time"] } +tokio-util = { workspace = true, features = ["rt"] } async-trait = { workspace = true } log = { workspace = true } socket2 = { workspace = true } @@ -37,3 +38,4 @@ zenoh-protocol = { workspace = true } zenoh-result = { workspace = true } zenoh-sync = { workspace = true } zenoh-util = { workspace = true } +zenoh-runtime = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-udp/src/lib.rs b/io/zenoh-links/zenoh-link-udp/src/lib.rs index 20a48e8f4d..91d02cc13d 100644 --- a/io/zenoh-links/zenoh-link-udp/src/lib.rs +++ b/io/zenoh-links/zenoh-link-udp/src/lib.rs @@ -20,7 +20,6 @@ mod multicast; mod unicast; -use async_std::net::ToSocketAddrs; use async_trait::async_trait; pub use multicast::*; use std::net::SocketAddr; @@ -90,9 +89,7 @@ pub mod config { } pub async fn get_udp_addrs(address: Address<'_>) -> ZResult> { - let iter = address - .as_str() - .to_socket_addrs() + let iter = tokio::net::lookup_host(address.as_str().to_string()) .await .map_err(|e| zerror!("{}", e))?; Ok(iter) diff --git a/io/zenoh-links/zenoh-link-udp/src/multicast.rs b/io/zenoh-links/zenoh-link-udp/src/multicast.rs index 497120ed0d..bc894bd296 100644 --- a/io/zenoh-links/zenoh-link-udp/src/multicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/multicast.rs @@ -13,11 +13,12 @@ // use super::{config::*, UDP_DEFAULT_MTU}; use crate::{get_udp_addrs, socket_addr_to_udp_locator}; -use async_std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}; use async_trait::async_trait; use socket2::{Domain, Protocol, Socket, Type}; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; use std::{borrow::Cow, fmt}; +use tokio::net::UdpSocket; use zenoh_link_commons::{LinkManagerMulticastTrait, LinkMulticast, LinkMulticastTrait}; use zenoh_protocol::core::{Config, EndPoint, Locator}; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; @@ -228,7 +229,10 @@ impl LinkManagerMulticastUdp { .bind(&SocketAddr::new(local_addr, 0).into()) .map_err(|e| zerror!("{}: {}", mcast_addr, e))?; - let ucast_sock: UdpSocket = std::net::UdpSocket::from(ucast_sock).into(); + // Must set to nonblocking according to the doc of tokio + // https://docs.rs/tokio/latest/tokio/net/struct.UdpSocket.html#notes + ucast_sock.set_nonblocking(true)?; + let ucast_sock = UdpSocket::from_std(ucast_sock.into())?; // Establish a multicast UDP socket let mcast_sock = Socket::new(domain, Type::DGRAM, Some(Protocol::UDP)) @@ -288,8 +292,12 @@ impl LinkManagerMulticastUdp { } }; - // Build the async_std multicast UdpSocket - let mcast_sock: UdpSocket = std::net::UdpSocket::from(mcast_sock).into(); + // Must set to nonblocking according to the doc of tokio + // https://docs.rs/tokio/latest/tokio/net/struct.UdpSocket.html#notes + mcast_sock.set_nonblocking(true)?; + + // Build the tokio multicast UdpSocket + let mcast_sock = UdpSocket::from_std(mcast_sock.into())?; let ucast_addr = ucast_sock .local_addr() diff --git a/io/zenoh-links/zenoh-link-udp/src/unicast.rs b/io/zenoh-links/zenoh-link-udp/src/unicast.rs index d5214510be..0862928e1a 100644 --- a/io/zenoh-links/zenoh-link-udp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/unicast.rs @@ -15,16 +15,15 @@ use super::{ get_udp_addrs, socket_addr_to_udp_locator, UDP_ACCEPT_THROTTLE_TIME, UDP_DEFAULT_MTU, UDP_MAX_MTU, }; -use async_std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}; -use async_std::prelude::*; -use async_std::sync::Mutex as AsyncMutex; -use async_std::task; use async_trait::async_trait; use std::collections::HashMap; use std::fmt; -use std::sync::atomic::{AtomicBool, Ordering}; +use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::{Arc, Mutex, Weak}; use std::time::Duration; +use tokio::net::UdpSocket; +use tokio::sync::Mutex as AsyncMutex; +use tokio_util::sync::CancellationToken; use zenoh_core::{zasynclock, zlock}; use zenoh_link_commons::{ get_ip_interface_names, ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, @@ -33,7 +32,6 @@ use zenoh_link_commons::{ use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; use zenoh_sync::Mvar; -use zenoh_sync::Signal; type LinkHashMap = Arc>>>; type LinkInput = (Vec, usize); @@ -279,7 +277,7 @@ impl LinkManagerUnicastUdp { e })?; - zenoh_util::net::set_bind_to_device_udp_socket(&socket, iface); + zenoh_util::net::set_bind_to_device_udp_socket(&socket, iface)?; // Connect the socket to the remote address socket.connect(dst_addr).await.map_err(|e| { @@ -316,7 +314,7 @@ impl LinkManagerUnicastUdp { e })?; - zenoh_util::net::set_bind_to_device_udp_socket(&socket, iface); + zenoh_util::net::set_bind_to_device_udp_socket(&socket, iface)?; let local_addr = socket.local_addr().map_err(|e| { let e = zerror!("Can not create a new UDP listener on {}: {}", addr, e); @@ -388,20 +386,15 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUdp { endpoint.config(), )?; - let active = Arc::new(AtomicBool::new(true)); - let signal = Signal::new(); - - let c_active = active.clone(); - let c_signal = signal.clone(); + let token = self.listeners.token.child_token(); + let c_token = token.clone(); let c_manager = self.manager.clone(); - let handle = task::spawn(async move { - accept_read_task(socket, c_active, c_signal, c_manager).await - }); + let task = async move { accept_read_task(socket, c_token, c_manager).await }; let locator = endpoint.to_locator(); self.listeners - .add_listener(endpoint, local_addr, active, signal, handle) + .add_listener(endpoint, local_addr, task, token) .await?; return Ok(locator); @@ -453,19 +446,18 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUdp { Ok(()) } - fn get_listeners(&self) -> Vec { + async fn get_listeners(&self) -> Vec { self.listeners.get_endpoints() } - fn get_locators(&self) -> Vec { + async fn get_locators(&self) -> Vec { self.listeners.get_locators() } } async fn accept_read_task( socket: UdpSocket, - active: Arc, - signal: Signal, + token: CancellationToken, manager: NewLinkChannelSender, ) -> ZResult<()> { let socket = Arc::new(socket); @@ -489,19 +481,9 @@ async fn accept_read_task( }; } - enum Action { - Receive((usize, SocketAddr)), - Stop, - } - - async fn receive(socket: Arc, buffer: &mut [u8]) -> ZResult { + async fn receive(socket: Arc, buffer: &mut [u8]) -> ZResult<(usize, SocketAddr)> { let res = socket.recv_from(buffer).await.map_err(|e| zerror!(e))?; - Ok(Action::Receive(res)) - } - - async fn stop(signal: Signal) -> ZResult { - signal.wait().await; - Ok(Action::Stop) + Ok(res) } let src_addr = socket.local_addr().map_err(|e| { @@ -511,65 +493,66 @@ async fn accept_read_task( })?; log::trace!("Ready to accept UDP connections on: {:?}", src_addr); - // Buffers for deserialization - while active.load(Ordering::Acquire) { + + loop { + // Buffers for deserialization let mut buff = zenoh_buffers::vec::uninit(UDP_MAX_MTU as usize); - // Wait for incoming connections - let (n, dst_addr) = match receive(socket.clone(), &mut buff) - .race(stop(signal.clone())) - .await - { - Ok(action) => match action { - Action::Receive((n, addr)) => (n, addr), - Action::Stop => break, - }, - Err(e) => { - log::warn!("{}. Hint: increase the system open file limit.", e); - // Throttle the accept loop upon an error - // NOTE: This might be due to various factors. However, the most common case is that - // the process has reached the maximum number of open files in the system. On - // Linux systems this limit can be changed by using the "ulimit" command line - // tool. In case of systemd-based systems, this can be changed by using the - // "sysctl" command line tool. - task::sleep(Duration::from_micros(*UDP_ACCEPT_THROTTLE_TIME)).await; - continue; - } - }; - let link = loop { - let res = zgetlink!(src_addr, dst_addr); - match res { - Some(link) => break link.upgrade(), - None => { - // A new peers has sent data to this socket - log::debug!("Accepted UDP connection on {}: {}", src_addr, dst_addr); - let unconnected = Arc::new(LinkUnicastUdpUnconnected { - socket: Arc::downgrade(&socket), - links: links.clone(), - input: Mvar::new(), - leftover: AsyncMutex::new(None), - }); - zaddlink!(src_addr, dst_addr, Arc::downgrade(&unconnected)); - // Create the new link object - let link = Arc::new(LinkUnicastUdp::new( - src_addr, - dst_addr, - LinkUnicastUdpVariant::Unconnected(unconnected), - )); - // Add the new link to the set of connected peers - if let Err(e) = manager.send_async(LinkUnicast(link)).await { - log::error!("{}-{}: {}", file!(), line!(), e) + tokio::select! { + _ = token.cancelled() => break, + + res = receive(socket.clone(), &mut buff) => { + match res { + Ok((n, dst_addr)) => { + let link = loop { + let res = zgetlink!(src_addr, dst_addr); + match res { + Some(link) => break link.upgrade(), + None => { + // A new peers has sent data to this socket + log::debug!("Accepted UDP connection on {}: {}", src_addr, dst_addr); + let unconnected = Arc::new(LinkUnicastUdpUnconnected { + socket: Arc::downgrade(&socket), + links: links.clone(), + input: Mvar::new(), + leftover: AsyncMutex::new(None), + }); + zaddlink!(src_addr, dst_addr, Arc::downgrade(&unconnected)); + // Create the new link object + let link = Arc::new(LinkUnicastUdp::new( + src_addr, + dst_addr, + LinkUnicastUdpVariant::Unconnected(unconnected), + )); + // Add the new link to the set of connected peers + if let Err(e) = manager.send_async(LinkUnicast(link)).await { + log::error!("{}-{}: {}", file!(), line!(), e) + } + } + } + }; + + match link { + Some(link) => { + link.received(buff, n).await; + } + None => { + zdellink!(src_addr, dst_addr); + } + } } - } - } - }; - match link { - Some(link) => { - link.received(buff, n).await; - } - None => { - zdellink!(src_addr, dst_addr); + Err(e) => { + log::warn!("{}. Hint: increase the system open file limit.", e); + // Throttle the accept loop upon an error + // NOTE: This might be due to various factors. However, the most common case is that + // the process has reached the maximum number of open files in the system. On + // Linux systems this limit can be changed by using the "ulimit" command line + // tool. In case of systemd-based systems, this can be changed by using the + // "sysctl" command line tool. + tokio::time::sleep(Duration::from_micros(*UDP_ACCEPT_THROTTLE_TIME)).await; + } + } } } } diff --git a/io/zenoh-links/zenoh-link-unixpipe/Cargo.toml b/io/zenoh-links/zenoh-link-unixpipe/Cargo.toml index 2801dc3a22..66784728f9 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/Cargo.toml +++ b/io/zenoh-links/zenoh-link-unixpipe/Cargo.toml @@ -28,9 +28,7 @@ description = "Internal crate for zenoh." transport_unixpipe = [] [dependencies] -async-std = { workspace = true } async-trait = { workspace = true } -async-io = { workspace = true } log = { workspace = true } rand = { workspace = true, features = ["default"] } zenoh-buffers = { workspace = true } @@ -39,6 +37,9 @@ zenoh-config = { workspace = true } zenoh-link-commons = { workspace = true } zenoh-protocol = { workspace = true } zenoh-result = { workspace = true } +zenoh-runtime = { workspace = true } +tokio = { workspace = true, features = ["sync", "fs", "io-util", "macros"] } +tokio-util = { workspace = true, features = ["rt"] } [target.'cfg(unix)'.dependencies] unix-named-pipe = "0.2.0" diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs index 8793add470..bcafaaba3c 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs @@ -44,9 +44,9 @@ impl LocatorInspector for UnixPipeLocatorInspector { #[derive(Default, Clone, Copy, Debug)] pub struct UnixPipeConfigurator; -#[async_trait] + impl ConfigurationInspector for UnixPipeConfigurator { - async fn inspect_config(&self, config: &Config) -> ZResult { + fn inspect_config(&self, config: &Config) -> ZResult { let mut properties: Vec<(&str, &str)> = vec![]; let c = config.transport().link().unixpipe(); diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs index 83f6414dee..8f3577a8e9 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs @@ -14,9 +14,6 @@ use crate::config; #[cfg(not(target_os = "macos"))] use advisory_lock::{AdvisoryFileLock, FileLockMode}; -use async_io::Async; -use async_std::fs::remove_file; -use async_std::task::JoinHandle; use async_trait::async_trait; use filepath::FilePath; use nix::libc; @@ -26,11 +23,17 @@ use std::cell::UnsafeCell; use std::collections::HashMap; use std::fmt; use std::fs::{File, OpenOptions}; +use std::io::ErrorKind; use std::io::{Read, Write}; use std::os::unix::fs::OpenOptionsExt; use std::sync::Arc; +use tokio::fs::remove_file; +use tokio::io::unix::AsyncFd; +use tokio::io::Interest; +use tokio_util::sync::CancellationToken; use zenoh_core::{zasyncread, zasyncwrite}; use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_runtime::ZRuntime; use unix_named_pipe::{create, open_write}; @@ -90,12 +93,12 @@ impl Invitation { } struct PipeR { - pipe: Async, + pipe: AsyncFd, } impl Drop for PipeR { fn drop(&mut self) { - if let Ok(path) = self.pipe.as_mut().path() { + if let Ok(path) = self.pipe.get_ref().path() { let _ = unlink(&path); } } @@ -105,38 +108,38 @@ impl PipeR { // create, open and lock named pipe let pipe_file = Self::create_and_open_unique_pipe_for_read(path, access_mode).await?; // create async_io wrapper for pipe's file descriptor - let pipe = Async::new(pipe_file)?; + let pipe = AsyncFd::new(pipe_file)?; Ok(Self { pipe }) } async fn read<'a>(&'a mut self, buf: &'a mut [u8]) -> ZResult { let result = self .pipe - .read_with_mut(|pipe| match pipe.read(&mut buf[..]) { - Ok(0) => Err(async_std::io::ErrorKind::WouldBlock.into()), + .async_io_mut(Interest::READABLE, |pipe| match pipe.read(&mut buf[..]) { + Ok(0) => Err(ErrorKind::WouldBlock.into()), Ok(val) => Ok(val), Err(e) => Err(e), }) .await?; - ZResult::Ok(result) + Ok(result) } async fn read_exact<'a>(&'a mut self, buf: &'a mut [u8]) -> ZResult<()> { let mut r: usize = 0; self.pipe - .read_with_mut(|pipe| match pipe.read(&mut buf[r..]) { - Ok(0) => Err(async_std::io::ErrorKind::WouldBlock.into()), + .async_io_mut(Interest::READABLE, |pipe| match pipe.read(&mut buf[r..]) { + Ok(0) => Err(ErrorKind::WouldBlock.into()), Ok(val) => { r += val; if r == buf.len() { return Ok(()); } - Err(async_std::io::ErrorKind::WouldBlock.into()) + Err(ErrorKind::WouldBlock.into()) } Err(e) => Err(e), }) .await?; - ZResult::Ok(()) + Ok(()) } async fn create_and_open_unique_pipe_for_read(path_r: &str, access_mode: u32) -> ZResult { @@ -176,45 +179,45 @@ impl PipeR { } struct PipeW { - pipe: Async, + pipe: AsyncFd, } impl PipeW { async fn new(path: &str) -> ZResult { // create, open and lock named pipe let pipe_file = Self::open_unique_pipe_for_write(path)?; // create async_io wrapper for pipe's file descriptor - let pipe = Async::new(pipe_file)?; + let pipe = AsyncFd::new(pipe_file)?; Ok(Self { pipe }) } async fn write<'a>(&'a mut self, buf: &'a [u8]) -> ZResult { let result = self .pipe - .write_with_mut(|pipe| match pipe.write(buf) { - Ok(0) => Err(async_std::io::ErrorKind::WouldBlock.into()), + .async_io_mut(Interest::WRITABLE, |pipe| match pipe.write(buf) { + Ok(0) => Err(ErrorKind::WouldBlock.into()), Ok(val) => Ok(val), Err(e) => Err(e), }) .await?; - ZResult::Ok(result) + Ok(result) } async fn write_all<'a>(&'a mut self, buf: &'a [u8]) -> ZResult<()> { let mut r: usize = 0; self.pipe - .write_with_mut(|pipe| match pipe.write(&buf[r..]) { - Ok(0) => Err(async_std::io::ErrorKind::WouldBlock.into()), + .async_io_mut(Interest::WRITABLE, |pipe| match pipe.write(&buf[r..]) { + Ok(0) => Err(ErrorKind::WouldBlock.into()), Ok(val) => { r += val; if r == buf.len() { return Ok(()); } - Err(async_std::io::ErrorKind::WouldBlock.into()) + Err(ErrorKind::WouldBlock.into()) } Err(e) => Err(e), }) .await?; - ZResult::Ok(()) + Ok(()) } fn open_unique_pipe_for_write(path: &str) -> ZResult { @@ -280,8 +283,8 @@ async fn handle_incoming_connections( } struct UnicastPipeListener { - listening_task_handle: JoinHandle>, uplink_locator: Locator, + token: CancellationToken, } impl UnicastPipeListener { async fn listen(endpoint: EndPoint, manager: Arc) -> ZResult { @@ -295,29 +298,38 @@ impl UnicastPipeListener { // create request channel let mut request_channel = PipeR::new(&path_uplink, access_mode).await?; + let token = CancellationToken::new(); + let c_token = token.clone(); + + // WARN: The spawn_blocking is mandatory verified by the ping/pong test // create listening task - let listening_task_handle = async_std::task::spawn(async move { - loop { - let _ = handle_incoming_connections( - &endpoint, - &manager, - &mut request_channel, - &path_downlink, - &path_uplink, - access_mode, - ) - .await; - } + tokio::task::spawn_blocking(move || { + ZRuntime::Acceptor.block_on(async move { + loop { + tokio::select! { + _ = handle_incoming_connections( + &endpoint, + &manager, + &mut request_channel, + &path_downlink, + &path_uplink, + access_mode, + ) => {} + + _ = c_token.cancelled() => break + } + } + }) }); Ok(Self { - listening_task_handle, uplink_locator: local, + token, }) } - async fn stop_listening(self) { - self.listening_task_handle.cancel().await; + fn stop_listening(self) { + self.token.cancel(); } } @@ -528,14 +540,14 @@ impl fmt::Debug for UnicastPipe { pub struct LinkManagerUnicastPipe { manager: Arc, - listeners: async_std::sync::RwLock>, + listeners: tokio::sync::RwLock>, } impl LinkManagerUnicastPipe { pub fn new(manager: NewLinkChannelSender) -> Self { Self { manager: Arc::new(manager), - listeners: async_std::sync::RwLock::new(HashMap::new()), + listeners: tokio::sync::RwLock::new(HashMap::new()), } } } @@ -563,22 +575,19 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastPipe { let removed = zasyncwrite!(self.listeners).remove(endpoint); match removed { Some(val) => { - val.stop_listening().await; + val.stop_listening(); Ok(()) } None => bail!("No listener found for endpoint {}", endpoint), } } - fn get_listeners(&self) -> Vec { - async_std::task::block_on(async { zasyncread!(self.listeners) }) - .keys() - .cloned() - .collect() + async fn get_listeners(&self) -> Vec { + zasyncread!(self.listeners).keys().cloned().collect() } - fn get_locators(&self) -> Vec { - async_std::task::block_on(async { zasyncread!(self.listeners) }) + async fn get_locators(&self) -> Vec { + zasyncread!(self.listeners) .values() .map(|v| v.uplink_locator.clone()) .collect() diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/Cargo.toml b/io/zenoh-links/zenoh-link-unixsock_stream/Cargo.toml index b10d0a154f..1e2bba789c 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/Cargo.toml +++ b/io/zenoh-links/zenoh-link-unixsock_stream/Cargo.toml @@ -18,12 +18,12 @@ version = { workspace = true } repository = { workspace = true } homepage = { workspace = true } authors = [ - "kydos ", - "Julien Enoch ", - "Olivier Hécart ", - "Luca Cominardi ", - "Pierre Avital ", - "Gabriele Baldoni " + "kydos ", + "Julien Enoch ", + "Olivier Hécart ", + "Luca Cominardi ", + "Pierre Avital ", + "Gabriele Baldoni " ] edition = { workspace = true } license = { workspace = true } @@ -32,14 +32,16 @@ description = "Internal crate for zenoh." # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -async-std = { workspace = true } async-trait = { workspace = true } futures = { workspace = true } log = { workspace = true } nix = { workspace = true } +tokio = { workspace = true, features = ["io-std", "macros", "net", "rt-multi-thread", "time"] } +tokio-util = { workspace = true, features = ["rt"] } uuid = { workspace = true, features = ["default"] } zenoh-core = { workspace = true } zenoh-link-commons = { workspace = true } zenoh-protocol = { workspace = true } zenoh-result = { workspace = true } +zenoh-runtime = { workspace = true } zenoh-sync = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs index 3ac1bcbfe6..53441ab89c 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs @@ -12,50 +12,54 @@ // ZettaScale Zenoh Team, // use super::UNIXSOCKSTREAM_ACCEPT_THROTTLE_TIME; -use async_std::os::unix::net::{UnixListener, UnixStream}; -use async_std::path::PathBuf; -use async_std::prelude::FutureExt; -use async_std::task; -use async_std::task::JoinHandle; use async_trait::async_trait; -use futures::io::AsyncReadExt; -use futures::io::AsyncWriteExt; +use std::cell::UnsafeCell; use std::collections::HashMap; use std::fmt; use std::fs::remove_file; -use std::net::Shutdown; use std::os::unix::io::RawFd; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::{Arc, RwLock}; +use std::path::PathBuf; +use std::sync::Arc; use std::time::Duration; +use tokio::io::{AsyncReadExt, AsyncWriteExt}; +use tokio::net::{UnixListener, UnixStream}; +use tokio::sync::RwLock as AsyncRwLock; +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use uuid::Uuid; -use zenoh_core::{zread, zwrite}; +use zenoh_core::{zasyncread, zasyncwrite}; use zenoh_link_commons::{ LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_result::{zerror, ZResult}; -use zenoh_sync::Signal; use super::{get_unix_path_as_string, UNIXSOCKSTREAM_DEFAULT_MTU, UNIXSOCKSTREAM_LOCATOR_PREFIX}; pub struct LinkUnicastUnixSocketStream { - // The underlying socket as returned from the async-std library - socket: UnixStream, + // The underlying socket as returned from the tokio library + socket: UnsafeCell, // The Unix domain socket source path src_locator: Locator, // The Unix domain socker destination path (random UUIDv4) dst_locator: Locator, } +unsafe impl Sync for LinkUnicastUnixSocketStream {} + impl LinkUnicastUnixSocketStream { fn new(socket: UnixStream, src_path: &str, dst_path: &str) -> LinkUnicastUnixSocketStream { LinkUnicastUnixSocketStream { - socket, + socket: UnsafeCell::new(socket), src_locator: Locator::new(UNIXSOCKSTREAM_LOCATOR_PREFIX, src_path, "").unwrap(), dst_locator: Locator::new(UNIXSOCKSTREAM_LOCATOR_PREFIX, dst_path, "").unwrap(), } } + + #[allow(clippy::mut_from_ref)] + fn get_mut_socket(&self) -> &mut UnixStream { + unsafe { &mut *self.socket.get() } + } } #[async_trait] @@ -63,13 +67,13 @@ impl LinkUnicastTrait for LinkUnicastUnixSocketStream { async fn close(&self) -> ZResult<()> { log::trace!("Closing UnixSocketStream link: {}", self); // Close the underlying UnixSocketStream socket - let res = self.socket.shutdown(Shutdown::Both); + let res = self.get_mut_socket().shutdown().await; log::trace!("UnixSocketStream link shutdown {}: {:?}", self, res); res.map_err(|e| zerror!(e).into()) } async fn write(&self, buffer: &[u8]) -> ZResult { - (&self.socket).write(buffer).await.map_err(|e| { + self.get_mut_socket().write(buffer).await.map_err(|e| { let e = zerror!("Write error on UnixSocketStream link {}: {}", self, e); log::trace!("{}", e); e.into() @@ -77,7 +81,7 @@ impl LinkUnicastTrait for LinkUnicastUnixSocketStream { } async fn write_all(&self, buffer: &[u8]) -> ZResult<()> { - (&self.socket).write_all(buffer).await.map_err(|e| { + self.get_mut_socket().write_all(buffer).await.map_err(|e| { let e = zerror!("Write error on UnixSocketStream link {}: {}", self, e); log::trace!("{}", e); e.into() @@ -85,7 +89,7 @@ impl LinkUnicastTrait for LinkUnicastUnixSocketStream { } async fn read(&self, buffer: &mut [u8]) -> ZResult { - (&self.socket).read(buffer).await.map_err(|e| { + self.get_mut_socket().read(buffer).await.map_err(|e| { let e = zerror!("Read error on UnixSocketStream link {}: {}", self, e); log::trace!("{}", e); e.into() @@ -93,11 +97,15 @@ impl LinkUnicastTrait for LinkUnicastUnixSocketStream { } async fn read_exact(&self, buffer: &mut [u8]) -> ZResult<()> { - (&self.socket).read_exact(buffer).await.map_err(|e| { - let e = zerror!("Read error on UnixSocketStream link {}: {}", self, e); - log::trace!("{}", e); - e.into() - }) + self.get_mut_socket() + .read_exact(buffer) + .await + .map(|_len| ()) + .map_err(|e| { + let e = zerror!("Read error on UnixSocketStream link {}: {}", self, e); + log::trace!("{}", e); + e.into() + }) } #[inline(always)] @@ -136,7 +144,8 @@ impl LinkUnicastTrait for LinkUnicastUnixSocketStream { impl Drop for LinkUnicastUnixSocketStream { fn drop(&mut self) { // Close the underlying UnixSocketStream socket - let _ = self.socket.shutdown(Shutdown::Both); + let _ = zenoh_runtime::ZRuntime::TX + .block_in_place(async move { self.get_mut_socket().shutdown().await }); } } @@ -161,8 +170,7 @@ impl fmt::Debug for LinkUnicastUnixSocketStream { /*************************************/ struct ListenerUnixSocketStream { endpoint: EndPoint, - active: Arc, - signal: Signal, + token: CancellationToken, handle: JoinHandle>, lock_fd: RawFd, } @@ -170,31 +178,33 @@ struct ListenerUnixSocketStream { impl ListenerUnixSocketStream { fn new( endpoint: EndPoint, - active: Arc, - signal: Signal, + token: CancellationToken, handle: JoinHandle>, lock_fd: RawFd, ) -> ListenerUnixSocketStream { ListenerUnixSocketStream { endpoint, - active, - signal, + token, handle, lock_fd, } } + + async fn stop(&self) { + self.token.cancel(); + } } pub struct LinkManagerUnicastUnixSocketStream { manager: NewLinkChannelSender, - listeners: Arc>>, + listeners: Arc>>, } impl LinkManagerUnicastUnixSocketStream { pub fn new(manager: NewLinkChannelSender) -> Self { Self { manager, - listeners: Arc::new(RwLock::new(HashMap::new())), + listeners: Arc::new(AsyncRwLock::new(HashMap::new())), } } } @@ -326,7 +336,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { let _ = remove_file(path.clone()); // Bind the Unix socket - let socket = UnixListener::bind(&path).await.map_err(|e| { + let socket = UnixListener::bind(&path).map_err(|e| { let e = zerror!( "Can not create a new UnixSocketStream listener on {}: {}", path, @@ -367,24 +377,24 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { )?; // Spawn the accept loop for the listener - let active = Arc::new(AtomicBool::new(true)); - let signal = Signal::new(); - let mut listeners = zwrite!(self.listeners); + let token = CancellationToken::new(); + let c_token = token.clone(); + let mut listeners = zasyncwrite!(self.listeners); - let c_active = active.clone(); - let c_signal = signal.clone(); let c_manager = self.manager.clone(); let c_listeners = self.listeners.clone(); let c_path = local_path_str.to_owned(); - let handle = task::spawn(async move { + + let task = async move { // Wait for the accept loop to terminate - let res = accept_task(socket, c_active, c_signal, c_manager).await; - zwrite!(c_listeners).remove(&c_path); + let res = accept_task(socket, c_token, c_manager).await; + zasyncwrite!(c_listeners).remove(&c_path); res - }); + }; + let handle = zenoh_runtime::ZRuntime::Acceptor.spawn(task); let locator = endpoint.to_locator(); - let listener = ListenerUnixSocketStream::new(endpoint, active, signal, handle, lock_fd); + let listener = ListenerUnixSocketStream::new(endpoint, token, handle, lock_fd); listeners.insert(local_path_str.to_owned(), listener); Ok(locator) @@ -394,7 +404,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { let path = get_unix_path_as_string(endpoint.address()); // Stop the listener - let listener = zwrite!(self.listeners).remove(&path).ok_or_else(|| { + let listener = zasyncwrite!(self.listeners).remove(&path).ok_or_else(|| { let e = zerror!( "Can not delete the UnixSocketStream listener because it has not been found: {}", path @@ -404,9 +414,8 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { })?; // Send the stop signal - listener.active.store(false, Ordering::Release); - listener.signal.trigger(); - let res = listener.handle.await; + listener.stop().await; + listener.handle.await??; //Release the lock let _ = nix::fcntl::flock(listener.lock_fd, nix::fcntl::FlockArg::UnlockNonblock); @@ -417,18 +426,19 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { let lock_file_path = format!("{path}.lock"); let tmp = remove_file(lock_file_path); log::trace!("UnixSocketStream Domain Socket removal result: {:?}", tmp); - res + + Ok(()) } - fn get_listeners(&self) -> Vec { - zread!(self.listeners) + async fn get_listeners(&self) -> Vec { + zasyncread!(self.listeners) .values() .map(|x| x.endpoint.clone()) .collect() } - fn get_locators(&self) -> Vec { - zread!(self.listeners) + async fn get_locators(&self) -> Vec { + zasyncread!(self.listeners) .values() .map(|x| x.endpoint.to_locator()) .collect() @@ -437,23 +447,12 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { async fn accept_task( socket: UnixListener, - active: Arc, - signal: Signal, + token: CancellationToken, manager: NewLinkChannelSender, ) -> ZResult<()> { - enum Action { - Accept(UnixStream), - Stop, - } - - async fn accept(socket: &UnixListener) -> ZResult { + async fn accept(socket: &UnixListener) -> ZResult { let (stream, _) = socket.accept().await.map_err(|e| zerror!(e))?; - Ok(Action::Accept(stream)) - } - - async fn stop(signal: Signal) -> ZResult { - signal.wait().await; - Ok(Action::Stop) + Ok(stream) } let src_addr = socket.local_addr().map_err(|e| { @@ -485,38 +484,41 @@ async fn accept_task( "Ready to accept UnixSocketStream connections on: {}", src_path ); - while active.load(Ordering::Acquire) { - // Wait for incoming connections - let stream = match accept(&socket).race(stop(signal.clone())).await { - Ok(action) => match action { - Action::Accept(stream) => stream, - Action::Stop => break, - }, - Err(e) => { - log::warn!("{}. Hint: increase the system open file limit.", e); - // Throttle the accept loop upon an error - // NOTE: This might be due to various factors. However, the most common case is that - // the process has reached the maximum number of open files in the system. On - // Linux systems this limit can be changed by using the "ulimit" command line - // tool. In case of systemd-based systems, this can be changed by using the - // "sysctl" command line tool. - task::sleep(Duration::from_micros(*UNIXSOCKSTREAM_ACCEPT_THROTTLE_TIME)).await; - continue; - } - }; - let dst_path = format!("{}", Uuid::new_v4()); - - log::debug!("Accepted UnixSocketStream connection on: {:?}", src_addr,); - - // Create the new link object - let link = Arc::new(LinkUnicastUnixSocketStream::new( - stream, src_path, &dst_path, - )); - - // Communicate the new link to the initial transport manager - if let Err(e) = manager.send_async(LinkUnicast(link)).await { - log::error!("{}-{}: {}", file!(), line!(), e) + loop { + tokio::select! { + _ = token.cancelled() => break, + + res = accept(&socket) => { + match res { + Ok(stream) => { + let dst_path = format!("{}", Uuid::new_v4()); + + log::debug!("Accepted UnixSocketStream connection on: {:?}", src_addr,); + + // Create the new link object + let link = Arc::new(LinkUnicastUnixSocketStream::new( + stream, src_path, &dst_path, + )); + + // Communicate the new link to the initial transport manager + if let Err(e) = manager.send_async(LinkUnicast(link)).await { + log::error!("{}-{}: {}", file!(), line!(), e) + } + + } + Err(e) => { + log::warn!("{}. Hint: increase the system open file limit.", e); + // Throttle the accept loop upon an error + // NOTE: This might be due to various factors. However, the most common case is that + // the process has reached the maximum number of open files in the system. On + // Linux systems this limit can be changed by using the "ulimit" command line + // tool. In case of systemd-based systems, this can be changed by using the + // "sysctl" command line tool. + tokio::time::sleep(Duration::from_micros(*UNIXSOCKSTREAM_ACCEPT_THROTTLE_TIME)).await; + } + } + } } } diff --git a/io/zenoh-links/zenoh-link-ws/Cargo.toml b/io/zenoh-links/zenoh-link-ws/Cargo.toml index 0015ba09aa..0a1027b9bd 100644 --- a/io/zenoh-links/zenoh-link-ws/Cargo.toml +++ b/io/zenoh-links/zenoh-link-ws/Cargo.toml @@ -18,12 +18,12 @@ version = { workspace = true } repository = { workspace = true } homepage = { workspace = true } authors = [ - "kydos ", - "Julien Enoch ", - "Olivier Hécart ", - "Luca Cominardi ", - "Pierre Avital ", - "Gabriele Baldoni " + "kydos ", + "Julien Enoch ", + "Olivier Hécart ", + "Luca Cominardi ", + "Pierre Avital ", + "Gabriele Baldoni " ] edition = { workspace = true } license = { workspace = true } @@ -32,11 +32,11 @@ description = "Internal crate for zenoh." # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -async-std = { workspace = true, features = ["unstable", "tokio1"] } async-trait = { workspace = true } futures-util = { workspace = true, features = ["sink", "std"] } log = { workspace = true } tokio = { workspace = true, features = ["io-std", "macros", "net", "rt-multi-thread", "time"] } +tokio-util = { workspace = true, features = ["rt"] } tokio-tungstenite = { workspace = true } url = { workspace = true } zenoh-core = { workspace = true } @@ -45,3 +45,4 @@ zenoh-protocol = { workspace = true } zenoh-result = { workspace = true } zenoh-sync = { workspace = true } zenoh-util = { workspace = true } +zenoh-runtime = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-ws/src/lib.rs b/io/zenoh-links/zenoh-link-ws/src/lib.rs index b013671af9..f68a20d15d 100644 --- a/io/zenoh-links/zenoh-link-ws/src/lib.rs +++ b/io/zenoh-links/zenoh-link-ws/src/lib.rs @@ -17,7 +17,6 @@ //! This crate is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use async_std::net::ToSocketAddrs; use async_trait::async_trait; use std::net::SocketAddr; use url::Url; @@ -59,7 +58,7 @@ zconfigurable! { } pub async fn get_ws_addr(address: Address<'_>) -> ZResult { - match address.as_str().to_socket_addrs().await?.next() { + match tokio::net::lookup_host(address.as_str()).await?.next() { Some(addr) => Ok(addr), None => bail!("Couldn't resolve WebSocket locator address: {}", address), } diff --git a/io/zenoh-links/zenoh-link-ws/src/unicast.rs b/io/zenoh-links/zenoh-link-ws/src/unicast.rs index 0ff1b1ab46..6a0cf64e6e 100644 --- a/io/zenoh-links/zenoh-link-ws/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-ws/src/unicast.rs @@ -12,10 +12,6 @@ // ZettaScale Zenoh Team, // -use async_std::prelude::*; -use async_std::sync::Mutex as AsyncMutex; -use async_std::task; -use async_std::task::JoinHandle; use async_trait::async_trait; use futures_util::stream::SplitSink; use futures_util::stream::SplitStream; @@ -24,20 +20,21 @@ use futures_util::StreamExt; use std::collections::HashMap; use std::fmt; use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::{Arc, RwLock}; +use std::sync::Arc; use std::time::Duration; use tokio::net::{TcpListener, TcpStream}; +use tokio::sync::{Mutex as AsyncMutex, RwLock as AsyncRwLock}; +use tokio::task::JoinHandle; use tokio_tungstenite::accept_async; use tokio_tungstenite::tungstenite::Message; use tokio_tungstenite::{MaybeTlsStream, WebSocketStream}; -use zenoh_core::{zasynclock, zread, zwrite}; +use tokio_util::sync::CancellationToken; +use zenoh_core::{zasynclock, zasyncread, zasyncwrite}; use zenoh_link_commons::{ LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_result::{bail, zerror, ZResult}; -use zenoh_sync::Signal; use super::{get_ws_addr, get_ws_url, TCP_ACCEPT_THROTTLE_TIME, WS_DEFAULT_MTU, WS_LOCATOR_PREFIX}; @@ -227,7 +224,7 @@ impl LinkUnicastTrait for LinkUnicastWs { impl Drop for LinkUnicastWs { fn drop(&mut self) { - task::block_on(async { + zenoh_runtime::ZRuntime::TX.block_in_place(async { let mut guard = zasynclock!(self.send); // Close the underlying TCP socket guard.close().await.unwrap_or_else(|e| { @@ -258,37 +255,38 @@ impl fmt::Debug for LinkUnicastWs { /*************************************/ struct ListenerUnicastWs { endpoint: EndPoint, - active: Arc, - signal: Signal, + token: CancellationToken, handle: JoinHandle>, } impl ListenerUnicastWs { fn new( endpoint: EndPoint, - active: Arc, - signal: Signal, + token: CancellationToken, handle: JoinHandle>, ) -> ListenerUnicastWs { ListenerUnicastWs { endpoint, - active, - signal, + token, handle, } } + + async fn stop(&self) { + self.token.cancel(); + } } pub struct LinkManagerUnicastWs { manager: NewLinkChannelSender, - listeners: Arc>>, + listeners: Arc>>, } impl LinkManagerUnicastWs { pub fn new(manager: NewLinkChannelSender) -> Self { Self { manager, - listeners: Arc::new(RwLock::new(HashMap::new())), + listeners: Arc::new(AsyncRwLock::new(HashMap::new())), } } } @@ -358,25 +356,24 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastWs { )?; // Spawn the accept loop for the listener - let active = Arc::new(AtomicBool::new(true)); - let signal = Signal::new(); - - let c_active = active.clone(); - let c_signal = signal.clone(); + let token = CancellationToken::new(); + let c_token = token.clone(); let c_manager = self.manager.clone(); let c_listeners = self.listeners.clone(); let c_addr = local_addr; - let handle = task::spawn(async move { + + let task = async move { // Wait for the accept loop to terminate - let res = accept_task(socket, c_active, c_signal, c_manager).await; - zwrite!(c_listeners).remove(&c_addr); + let res = accept_task(socket, c_token, c_manager).await; + zasyncwrite!(c_listeners).remove(&c_addr); res - }); + }; + let handle = zenoh_runtime::ZRuntime::Acceptor.spawn(task); let locator = endpoint.to_locator(); - let listener = ListenerUnicastWs::new(endpoint, active, signal, handle); + let listener = ListenerUnicastWs::new(endpoint, token, handle); // Update the list of active listeners on the manager - zwrite!(self.listeners).insert(local_addr, listener); + zasyncwrite!(self.listeners).insert(local_addr, listener); Ok(locator) } @@ -385,7 +382,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastWs { let addr = get_ws_addr(endpoint.address()).await?; // Stop the listener - let listener = zwrite!(self.listeners).remove(&addr).ok_or_else(|| { + let listener = zasyncwrite!(self.listeners).remove(&addr).ok_or_else(|| { let e = zerror!( "Can not delete the TCP (WebSocket) listener because it has not been found: {}", addr @@ -395,24 +392,23 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastWs { })?; // Send the stop signal - listener.active.store(false, Ordering::Release); - listener.signal.trigger(); - listener.handle.await + listener.stop().await; + listener.handle.await? } - fn get_listeners(&self) -> Vec { - zread!(self.listeners) + async fn get_listeners(&self) -> Vec { + zasyncread!(self.listeners) .values() .map(|l| l.endpoint.clone()) .collect() } - fn get_locators(&self) -> Vec { + async fn get_locators(&self) -> Vec { let mut locators = Vec::new(); let default_ipv4 = Ipv4Addr::UNSPECIFIED; let default_ipv6 = Ipv6Addr::UNSPECIFIED; - let guard = zread!(self.listeners); + let guard = zasyncread!(self.listeners); for (key, value) in guard.iter() { let listener_locator = value.endpoint.to_locator(); if key.ip() == default_ipv4 { @@ -461,23 +457,12 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastWs { async fn accept_task( socket: TcpListener, - active: Arc, - signal: Signal, + token: CancellationToken, manager: NewLinkChannelSender, ) -> ZResult<()> { - enum Action { - Accept((TcpStream, SocketAddr)), - Stop, - } - - async fn accept(socket: &TcpListener) -> ZResult { + async fn accept(socket: &TcpListener) -> ZResult<(TcpStream, SocketAddr)> { let res = socket.accept().await.map_err(|e| zerror!(e))?; - Ok(Action::Accept(res)) - } - - async fn stop(signal: Signal) -> ZResult { - signal.wait().await; - Ok(Action::Stop) + Ok(res) } let src_addr = socket.local_addr().map_err(|e| { @@ -490,24 +475,27 @@ async fn accept_task( "Ready to accept TCP (WebSocket) connections on: {:?}", src_addr ); - while active.load(Ordering::Acquire) { - // Wait for incoming connections - let (stream, dst_addr) = match accept(&socket).race(stop(signal.clone())).await { - Ok(action) => match action { - Action::Accept((stream, addr)) => (stream, addr), - Action::Stop => break, + + loop { + let (stream, dst_addr) = tokio::select! { + res = accept(&socket) => { + match res { + Ok(res) => res, + Err(e) => { + log::warn!("{}. Hint: increase the system open file limit.", e); + // Throttle the accept loop upon an error + // NOTE: This might be due to various factors. However, the most common case is that + // the process has reached the maximum number of open files in the system. On + // Linux systems this limit can be changed by using the "ulimit" command line + // tool. In case of systemd-based systems, this can be changed by using the + // "sysctl" command line tool. + tokio::time::sleep(Duration::from_micros(*TCP_ACCEPT_THROTTLE_TIME)).await; + continue; + } + } }, - Err(e) => { - log::warn!("{}. Hint: increase the system open file limit.", e); - // Throttle the accept loop upon an error - // NOTE: This might be due to various factors. However, the most common case is that - // the process has reached the maximum number of open files in the system. On - // Linux systems this limit can be changed by using the "ulimit" command line - // tool. In case of systemd-based systems, this can be changed by using the - // "sysctl" command line tool. - task::sleep(Duration::from_micros(*TCP_ACCEPT_THROTTLE_TIME)).await; - continue; - } + + _ = token.cancelled() => break, }; log::debug!( diff --git a/io/zenoh-transport/Cargo.toml b/io/zenoh-transport/Cargo.toml index 0d9d494606..4cb51fc504 100644 --- a/io/zenoh-transport/Cargo.toml +++ b/io/zenoh-transport/Cargo.toml @@ -49,10 +49,17 @@ unstable = [] default = ["test", "transport_multilink"] [dependencies] -async-executor = { workspace = true } -async-global-executor = { workspace = true } -async-std = { workspace = true } async-trait = { workspace = true } +tokio = { workspace = true, features = [ + "sync", + "fs", + "time", + "macros", + "rt-multi-thread", + "io-util", + "net", +] } +tokio-util = { workspace = true, features = ["rt"]} flume = { workspace = true } log = { workspace = true } lz4_flex = { workspace = true } @@ -74,7 +81,12 @@ zenoh-result = { workspace = true } zenoh-shm = { workspace = true, optional = true } zenoh-sync = { workspace = true } zenoh-util = { workspace = true } +zenoh-runtime = { workspace = true } + + [dev-dependencies] +futures-util = { workspace = true } env_logger = { workspace = true } zenoh-protocol = { workspace = true, features = ["test"] } +futures = { workspace = true } diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index d3a80af34b..6ef385276f 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -17,7 +17,6 @@ use super::{ batch::{Encode, WBatch}, priority::{TransportChannelTx, TransportPriorityTx}, }; -use async_std::prelude::FutureExt; use flume::{bounded, Receiver, Sender}; use ringbuffer_spsc::{RingBuffer, RingBufferReader, RingBufferWriter}; use std::sync::atomic::{AtomicBool, AtomicU16, Ordering}; @@ -668,11 +667,9 @@ impl TransmissionPipelineConsumer { } // Wait for the backoff to expire or for a new message - let _ = self - .n_out_r - .recv_async() - .timeout(Duration::from_nanos(bo as u64)) - .await; + let _ = + tokio::time::timeout(Duration::from_nanos(bo as u64), self.n_out_r.recv_async()) + .await; } None } @@ -709,7 +706,6 @@ impl TransmissionPipelineConsumer { #[cfg(test)] mod tests { use super::*; - use async_std::{prelude::FutureExt, task}; use std::{ convert::TryFrom, sync::{ @@ -718,6 +714,8 @@ mod tests { }, time::{Duration, Instant}, }; + use tokio::task; + use tokio::time::timeout; use zenoh_buffers::{ reader::{DidntRead, HasReader}, ZBuf, @@ -729,6 +727,7 @@ mod tests { transport::{BatchSize, Fragment, Frame, TransportBody, TransportSn}, zenoh::{PushBody, Put}, }; + use zenoh_result::ZResult; const SLEEP: Duration = Duration::from_millis(100); const TIMEOUT: Duration = Duration::from_secs(60); @@ -744,8 +743,8 @@ mod tests { backoff: Duration::from_micros(1), }; - #[test] - fn tx_pipeline_flow() { + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn tx_pipeline_flow() -> ZResult<()> { fn schedule(queue: TransmissionPipelineProducer, num_msg: usize, payload_size: usize) { // Send reliable messages let key = "test".into(); @@ -831,7 +830,7 @@ mod tests { } // Pipeline priorities - let tct = TransportPriorityTx::make(Bits::from(TransportSn::MAX)).unwrap(); + let tct = TransportPriorityTx::make(Bits::from(TransportSn::MAX))?; let priorities = vec![tct]; // Total amount of bytes to send in each test @@ -840,37 +839,37 @@ mod tests { // Payload size of the messages let payload_sizes = [8, 64, 512, 4_096, 8_192, 32_768, 262_144, 2_097_152]; - task::block_on(async { - for ps in payload_sizes.iter() { - if u64::try_from(*ps).is_err() { - break; - } + for ps in payload_sizes.iter() { + if u64::try_from(*ps).is_err() { + break; + } - // Compute the number of messages to send - let num_msg = max_msgs.min(bytes / ps); + // Compute the number of messages to send + let num_msg = max_msgs.min(bytes / ps); - let (producer, consumer) = TransmissionPipeline::make( - TransmissionPipelineConf::default(), - priorities.as_slice(), - ); + let (producer, consumer) = TransmissionPipeline::make( + TransmissionPipelineConf::default(), + priorities.as_slice(), + ); + + let t_c = task::spawn(async move { + consume(consumer, num_msg).await; + }); - let t_c = task::spawn(async move { - consume(consumer, num_msg).await; - }); + let c_ps = *ps; + let t_s = task::spawn_blocking(move || { + schedule(producer, num_msg, c_ps); + }); - let c_ps = *ps; - let t_s = task::spawn_blocking(move || { - schedule(producer, num_msg, c_ps); - }); + let res = tokio::time::timeout(TIMEOUT, futures::future::join_all([t_c, t_s])).await; + assert!(res.is_ok()); + } - let res = t_c.join(t_s).timeout(TIMEOUT).await; - assert!(res.is_ok()); - } - }); + Ok(()) } - #[test] - fn tx_pipeline_blocking() { + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn tx_pipeline_blocking() -> ZResult<()> { fn schedule(queue: TransmissionPipelineProducer, counter: Arc, id: usize) { // Make sure to put only one message per batch: set the payload size // to half of the batch in such a way the serialized zenoh message @@ -917,7 +916,7 @@ mod tests { } // Pipeline - let tct = TransportPriorityTx::make(Bits::from(TransportSn::MAX)).unwrap(); + let tct = TransportPriorityTx::make(Bits::from(TransportSn::MAX))?; let priorities = vec![tct]; let (producer, mut consumer) = TransmissionPipeline::make(TransmissionPipelineConf::default(), priorities.as_slice()); @@ -935,39 +934,41 @@ mod tests { schedule(producer, c_counter, 2); }); - task::block_on(async { - // Wait to have sent enough messages and to have blocked - println!( - "Pipeline Blocking [---]: waiting to have {} messages being scheduled", - CONFIG.queue_size[Priority::MAX as usize] - ); - let check = async { - while counter.load(Ordering::Acquire) < CONFIG.queue_size[Priority::MAX as usize] { - task::sleep(SLEEP).await; - } - }; - check.timeout(TIMEOUT).await.unwrap(); + // Wait to have sent enough messages and to have blocked + println!( + "Pipeline Blocking [---]: waiting to have {} messages being scheduled", + CONFIG.queue_size[Priority::MAX as usize] + ); + let check = async { + while counter.load(Ordering::Acquire) < CONFIG.queue_size[Priority::MAX as usize] { + tokio::time::sleep(SLEEP).await; + } + }; + + timeout(TIMEOUT, check).await?; - // Disable and drain the queue + // Disable and drain the queue + timeout( + TIMEOUT, task::spawn_blocking(move || { println!("Pipeline Blocking [---]: draining the queue"); let _ = consumer.drain(); - }) - .timeout(TIMEOUT) - .await - .unwrap(); - - // Make sure that the tasks scheduling have been unblocked - println!("Pipeline Blocking [---]: waiting for schedule (1) to be unblocked"); - h1.timeout(TIMEOUT).await.unwrap(); - println!("Pipeline Blocking [---]: waiting for schedule (2) to be unblocked"); - h2.timeout(TIMEOUT).await.unwrap(); - }); + }), + ) + .await??; + + // Make sure that the tasks scheduling have been unblocked + println!("Pipeline Blocking [---]: waiting for schedule (1) to be unblocked"); + timeout(TIMEOUT, h1).await??; + println!("Pipeline Blocking [---]: waiting for schedule (2) to be unblocked"); + timeout(TIMEOUT, h2).await??; + + Ok(()) } - #[test] + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] - fn tx_pipeline_thr() { + async fn tx_pipeline_thr() { // Queue let tct = TransportPriorityTx::make(Bits::from(TransportSn::MAX)).unwrap(); let priorities = vec![tct]; @@ -1029,18 +1030,16 @@ mod tests { } }); - task::block_on(async { - let mut prev_size: usize = usize::MAX; - loop { - let received = count.swap(0, Ordering::AcqRel); - let current: usize = size.load(Ordering::Acquire); - if current == prev_size { - let thr = (8.0 * received as f64) / 1_000_000_000.0; - println!("{} bytes: {:.6} Gbps", current, 2.0 * thr); - } - prev_size = current; - task::sleep(Duration::from_millis(500)).await; + let mut prev_size: usize = usize::MAX; + loop { + let received = count.swap(0, Ordering::AcqRel); + let current: usize = size.load(Ordering::Acquire); + if current == prev_size { + let thr = (8.0 * received as f64) / 1_000_000_000.0; + println!("{} bytes: {:.6} Gbps", current, 2.0 * thr); } - }); + prev_size = current; + tokio::time::sleep(Duration::from_millis(500)).await; + } } } diff --git a/io/zenoh-transport/src/manager.rs b/io/zenoh-transport/src/manager.rs index 3c225274aa..9bf40e5fd3 100644 --- a/io/zenoh-transport/src/manager.rs +++ b/io/zenoh-transport/src/manager.rs @@ -19,11 +19,12 @@ use crate::multicast::manager::{ TransportManagerBuilderMulticast, TransportManagerConfigMulticast, TransportManagerStateMulticast, }; -use async_std::{sync::Mutex as AsyncMutex, task}; use rand::{RngCore, SeedableRng}; use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; +use tokio::sync::Mutex as AsyncMutex; +use tokio_util::sync::CancellationToken; use zenoh_config::{Config, LinkRxConf, QueueConf, QueueSizeConf}; use zenoh_crypto::{BlockCipher, PseudoRng}; use zenoh_link::NewLinkChannelSender; @@ -73,7 +74,7 @@ use zenoh_result::{bail, ZResult}; /// .lease(Duration::from_secs(1)) /// .keep_alive(4) // Send a KeepAlive every 250 ms /// .accept_timeout(Duration::from_secs(1)) -/// .accept_pending(10) // Set to 10 the number of simultanous pending incoming transports +/// .accept_pending(10) // Set to 10 the number of simultanous pending incoming transports /// .max_sessions(5); // Allow max 5 transports open /// let mut resolution = Resolution::default(); /// resolution.set(Field::FrameSN, Bits::U8); @@ -215,9 +216,7 @@ impl TransportManagerBuilder { self = self.tx_threads(*link.tx().threads()); self = self.protocols(link.protocols().clone()); - let (c, errors) = zenoh_link::LinkConfigurator::default() - .configurations(config) - .await; + let (c, errors) = zenoh_link::LinkConfigurator::default().configurations(config); if !errors.is_empty() { use std::fmt::Write; let mut formatter = String::from("Some protocols reported configuration errors:\r\n"); @@ -232,11 +231,7 @@ impl TransportManagerBuilder { .from_config(config) .await?, ); - self = self.multicast( - TransportManagerBuilderMulticast::default() - .from_config(config) - .await?, - ); + self = self.multicast(TransportManagerBuilderMulticast::default().from_config(config)?); Ok(self) } @@ -316,39 +311,6 @@ impl Default for TransportManagerBuilder { } } -#[derive(Clone)] -pub(crate) struct TransportExecutor { - executor: Arc>, - sender: async_std::channel::Sender<()>, -} - -impl TransportExecutor { - fn new(num_threads: usize) -> Self { - let (sender, receiver) = async_std::channel::bounded(1); - let executor = Arc::new(async_executor::Executor::new()); - for i in 0..num_threads { - let exec = executor.clone(); - let recv = receiver.clone(); - std::thread::Builder::new() - .name(format!("zenoh-tx-{}", i)) - .spawn(move || async_std::task::block_on(exec.run(recv.recv()))) - .unwrap(); - } - Self { executor, sender } - } - - async fn stop(&self) { - let _ = self.sender.send(()).await; - } - - pub(crate) fn spawn( - &self, - future: impl core::future::Future + Send + 'static, - ) -> async_executor::Task { - self.executor.spawn(future) - } -} - #[derive(Clone)] pub struct TransportManager { pub config: Arc, @@ -357,9 +319,9 @@ pub struct TransportManager { pub(crate) cipher: Arc, pub(crate) locator_inspector: zenoh_link::LocatorInspector, pub(crate) new_unicast_link_sender: NewLinkChannelSender, - pub(crate) tx_executor: TransportExecutor, #[cfg(feature = "stats")] pub(crate) stats: Arc, + pub(crate) token: CancellationToken, } impl TransportManager { @@ -372,7 +334,6 @@ impl TransportManager { // @TODO: this should be moved into the unicast module let (new_unicast_link_sender, new_unicast_link_receiver) = flume::unbounded(); - let tx_threads = params.config.tx_threads; let this = TransportManager { config: Arc::new(params.config), state: Arc::new(params.state), @@ -380,17 +341,31 @@ impl TransportManager { cipher: Arc::new(cipher), locator_inspector: Default::default(), new_unicast_link_sender, - tx_executor: TransportExecutor::new(tx_threads), #[cfg(feature = "stats")] stats: std::sync::Arc::new(crate::stats::TransportStats::default()), + token: CancellationToken::new(), }; // @TODO: this should be moved into the unicast module - async_std::task::spawn({ + zenoh_runtime::ZRuntime::Net.spawn({ let this = this.clone(); + let token = this.token.clone(); async move { - while let Ok(link) = new_unicast_link_receiver.recv_async().await { - this.handle_new_link_unicast(link).await; + // while let Ok(link) = new_unicast_link_receiver.recv_async().await { + // this.handle_new_link_unicast(link).await; + // } + loop { + tokio::select! { + res = new_unicast_link_receiver.recv_async() => { + if let Ok(link) = res { + this.handle_new_link_unicast(link).await; + } + } + + _ = token.cancelled() => { + break; + } + } } } }); @@ -413,7 +388,10 @@ impl TransportManager { pub async fn close(&self) { self.close_unicast().await; - self.tx_executor.stop().await; + // TODO: Check this + self.token.cancel(); + // WARN: depends on the auto-close of tokio runtime after dropped + // self.tx_executor.runtime.shutdown_background(); } /*************************************/ @@ -443,16 +421,17 @@ impl TransportManager { } } - pub fn get_listeners(&self) -> Vec { - let mut lsu = task::block_on(self.get_listeners_unicast()); - let mut lsm = task::block_on(self.get_listeners_multicast()); + pub async fn get_listeners(&self) -> Vec { + let mut lsu = self.get_listeners_unicast().await; + let mut lsm = self.get_listeners_multicast().await; lsu.append(&mut lsm); lsu } + // TODO(yuyuan): Can we make this async as above? pub fn get_locators(&self) -> Vec { - let mut lsu = task::block_on(self.get_locators_unicast()); - let mut lsm = task::block_on(self.get_locators_multicast()); + let mut lsu = zenoh_runtime::ZRuntime::TX.block_in_place(self.get_locators_unicast()); + let mut lsm = zenoh_runtime::ZRuntime::TX.block_in_place(self.get_locators_multicast()); lsu.append(&mut lsm); lsu } diff --git a/io/zenoh-transport/src/multicast/establishment.rs b/io/zenoh-transport/src/multicast/establishment.rs index cec09ebdf2..a0b7576f03 100644 --- a/io/zenoh-transport/src/multicast/establishment.rs +++ b/io/zenoh-transport/src/multicast/establishment.rs @@ -91,6 +91,7 @@ pub(crate) async fn open_link( w_guard.insert(locator.clone(), ti.clone()); drop(w_guard); + // TODO(yuyuan): resolve the structure entanglement below // Notify the transport event handler let transport: TransportMulticast = (&ti).into(); diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index 21ed0b3fdf..3565f747a0 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -23,12 +23,6 @@ use crate::{ priority::TransportPriorityTx, }, multicast::transport::TransportMulticastInner, - TransportExecutor, -}; -use async_executor::Task; -use async_std::{ - prelude::FutureExt, - task::{self, JoinHandle}, }; use std::{ convert::TryInto, @@ -36,6 +30,7 @@ use std::{ sync::Arc, time::{Duration, Instant}, }; +use tokio::task::JoinHandle; use zenoh_buffers::{BBuf, ZSlice, ZSliceBuffer}; use zenoh_core::{zcondfeat, zlock}; use zenoh_link::{Link, LinkMulticast, Locator}; @@ -262,6 +257,7 @@ pub(super) struct TransportLinkMulticastConfigUniversal { pub(super) batch_size: BatchSize, } +// TODO(yuyuan): Introduce TaskTracker or JoinSet and retire handle_tx, handle_rx, and signal_rx. #[derive(Clone)] pub(super) struct TransportLinkMulticastUniversal { // The underlying link @@ -271,7 +267,7 @@ pub(super) struct TransportLinkMulticastUniversal { // The transport this link is associated to transport: TransportMulticastInner, // The signals to stop TX/RX tasks - handle_tx: Option>>, + handle_tx: Option>>, signal_rx: Signal, handle_rx: Option>>, } @@ -297,7 +293,6 @@ impl TransportLinkMulticastUniversal { &mut self, config: TransportLinkMulticastConfigUniversal, priority_tx: Arc<[TransportPriorityTx]>, - executor: &TransportExecutor, ) { let initial_sns: Vec = priority_tx .iter() @@ -333,22 +328,23 @@ impl TransportLinkMulticastUniversal { // Spawn the TX task let c_link = self.link.clone(); - let ctransport = self.transport.clone(); - let handle = executor.spawn(async move { + let c_transport = self.transport.clone(); + + let handle = zenoh_runtime::ZRuntime::TX.spawn(async move { let res = tx_task( consumer, c_link.tx(), config, initial_sns, #[cfg(feature = "stats")] - ctransport.stats.clone(), + c_transport.stats.clone(), ) .await; if let Err(e) = res { log::debug!("{}", e); // Spawn a task to avoid a deadlock waiting for this same task // to finish in the close() joining its handle - task::spawn(async move { ctransport.delete().await }); + zenoh_runtime::ZRuntime::Net.spawn(async move { c_transport.delete().await }); } }); self.handle_tx = Some(Arc::new(handle)); @@ -365,15 +361,15 @@ impl TransportLinkMulticastUniversal { if self.handle_rx.is_none() { // Spawn the RX task let c_link = self.link.clone(); - let ctransport = self.transport.clone(); + let c_transport = self.transport.clone(); let c_signal = self.signal_rx.clone(); let c_rx_buffer_size = self.transport.manager.config.link_rx_buffer_size; - let handle = task::spawn(async move { + let handle = zenoh_runtime::ZRuntime::RX.spawn(async move { // Start the consume task let res = rx_task( c_link.rx(), - ctransport.clone(), + c_transport.clone(), c_signal.clone(), c_rx_buffer_size, batch_size, @@ -384,7 +380,7 @@ impl TransportLinkMulticastUniversal { log::debug!("{}", e); // Spawn a task to avoid a deadlock waiting for this same task // to finish in the close() joining its handle - task::spawn(async move { ctransport.delete().await }); + zenoh_runtime::ZRuntime::Net.spawn(async move { c_transport.delete().await }); } }); self.handle_rx = Some(Arc::new(handle)); @@ -401,14 +397,14 @@ impl TransportLinkMulticastUniversal { if let Some(handle) = self.handle_rx.take() { // It is safe to unwrap the Arc since we have the ownership of the whole link let handle_rx = Arc::try_unwrap(handle).unwrap(); - handle_rx.await; + handle_rx.await?; } self.stop_tx(); if let Some(handle) = self.handle_tx.take() { // It is safe to unwrap the Arc since we have the ownership of the whole link let handle_tx = Arc::try_unwrap(handle).unwrap(); - handle_tx.await; + handle_tx.await?; } self.link.close(None).await @@ -425,54 +421,65 @@ async fn tx_task( mut last_sns: Vec, #[cfg(feature = "stats")] stats: Arc, ) -> ZResult<()> { - enum Action { - Pull((WBatch, usize)), - Join, - Stop, - } - - async fn pull(pipeline: &mut TransmissionPipelineConsumer) -> Action { - match pipeline.pull().await { - Some(sb) => Action::Pull(sb), - None => Action::Stop, - } - } - - async fn join(last_join: Instant, join_interval: Duration) -> Action { + async fn join(last_join: Instant, join_interval: Duration) { let now = Instant::now(); let target = last_join + join_interval; if now < target { let left = target - now; - task::sleep(left).await; + tokio::time::sleep(left).await; } - Action::Join } let mut last_join = Instant::now().checked_sub(config.join_interval).unwrap(); loop { - match pull(&mut pipeline) - .race(join(last_join, config.join_interval)) - .await - { - Action::Pull((mut batch, priority)) => { - // Send the buffer on the link - link.send_batch(&mut batch).await?; - // Keep track of next SNs - if let Some(sn) = batch.codec.latest_sn.reliable { - last_sns[priority].reliable = sn; - } - if let Some(sn) = batch.codec.latest_sn.best_effort { - last_sns[priority].best_effort = sn; - } - #[cfg(feature = "stats")] - { - stats.inc_tx_t_msgs(batch.stats.t_msgs); - stats.inc_tx_bytes(batch.len() as usize); + tokio::select! { + res = pipeline.pull() => { + match res { + Some((mut batch, priority)) => { + // Send the buffer on the link + link.send_batch(&mut batch).await?; + // Keep track of next SNs + if let Some(sn) = batch.codec.latest_sn.reliable { + last_sns[priority].reliable = sn; + } + if let Some(sn) = batch.codec.latest_sn.best_effort { + last_sns[priority].best_effort = sn; + } + #[cfg(feature = "stats")] + { + stats.inc_tx_t_msgs(batch.stats.t_msgs); + stats.inc_tx_bytes(batch.len() as usize); + } + // Reinsert the batch into the queue + pipeline.refill(batch, priority); + } + None => { + // Drain the transmission pipeline and write remaining bytes on the wire + let mut batches = pipeline.drain(); + for (mut b, _) in batches.drain(..) { + tokio::time::timeout(config.join_interval, link.send_batch(&mut b)) + .await + .map_err(|_| { + zerror!( + "{}: flush failed after {} ms", + link, + config.join_interval.as_millis() + ) + })??; + + #[cfg(feature = "stats")] + { + stats.inc_tx_t_msgs(b.stats.t_msgs); + stats.inc_tx_bytes(b.len() as usize); + } + } + break; + } + } - // Reinsert the batch into the queue - pipeline.refill(batch, priority); } - Action::Join => { + + _ = join(last_join, config.join_interval) => { let next_sns = last_sns .iter() .map(|c| PrioritySn { @@ -509,29 +516,7 @@ async fn tx_task( } last_join = Instant::now(); - } - Action::Stop => { - // Drain the transmission pipeline and write remaining bytes on the wire - let mut batches = pipeline.drain(); - for (mut b, _) in batches.drain(..) { - link.send_batch(&mut b) - .timeout(config.join_interval) - .await - .map_err(|_| { - zerror!( - "{}: flush failed after {} ms", - link, - config.join_interval.as_millis() - ) - })??; - #[cfg(feature = "stats")] - { - stats.inc_tx_t_msgs(b.stats.t_msgs); - stats.inc_tx_bytes(b.len() as usize); - } - } - break; } } } @@ -546,15 +531,10 @@ async fn rx_task( rx_buffer_size: usize, batch_size: BatchSize, ) -> ZResult<()> { - enum Action { - Read((RBatch, Locator)), - Stop, - } - async fn read( link: &mut TransportLinkMulticastRx, pool: &RecyclingObjectPool, - ) -> ZResult + ) -> ZResult<(RBatch, Locator)> where T: ZSliceBuffer + 'static, F: Fn() -> T, @@ -563,12 +543,7 @@ async fn rx_task( let (rbatch, locator) = link .recv_batch(|| pool.try_take().unwrap_or_else(|| pool.alloc())) .await?; - Ok(Action::Read((rbatch, locator))) - } - - async fn stop(signal: Signal) -> ZResult { - signal.wait().await; - Ok(Action::Stop) + Ok((rbatch, locator)) } // The pool of buffers @@ -579,11 +554,12 @@ async fn rx_task( } let pool = RecyclingObjectPool::new(n, || vec![0_u8; mtu].into_boxed_slice()); - while !signal.is_triggered() { - // Async read from the underlying link - let action = read(&mut link, &pool).race(stop(signal.clone())).await?; - match action { - Action::Read((batch, locator)) => { + loop { + tokio::select! { + _ = signal.wait() => break, + res = read(&mut link, &pool) => { + let (batch, locator) = res?; + #[cfg(feature = "stats")] transport.stats.inc_rx_bytes(batch.len()); @@ -596,7 +572,6 @@ async fn rx_task( &transport, )?; } - Action::Stop => break, } } Ok(()) diff --git a/io/zenoh-transport/src/multicast/manager.rs b/io/zenoh-transport/src/multicast/manager.rs index 7cda3d8eb3..b9b594205f 100644 --- a/io/zenoh-transport/src/multicast/manager.rs +++ b/io/zenoh-transport/src/multicast/manager.rs @@ -15,10 +15,10 @@ use crate::multicast::shm::SharedMemoryMulticast; use crate::multicast::{transport::TransportMulticastInner, TransportMulticast}; use crate::TransportManager; -use async_std::sync::Mutex; use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; +use tokio::sync::Mutex; #[cfg(feature = "transport_compression")] use zenoh_config::CompressionMulticastConf; #[cfg(feature = "shared-memory")] @@ -107,10 +107,7 @@ impl TransportManagerBuilderMulticast { self } - pub async fn from_config( - mut self, - config: &Config, - ) -> ZResult { + pub fn from_config(mut self, config: &Config) -> ZResult { self = self.lease(Duration::from_millis( *config.transport().link().tx().lease(), )); @@ -173,7 +170,7 @@ impl Default for TransportManagerBuilderMulticast { #[cfg(feature = "transport_compression")] is_compression: *compression.enabled(), }; - async_std::task::block_on(tmb.from_config(&Config::default())).unwrap() + tmb.from_config(&Config::default()).unwrap() } } diff --git a/io/zenoh-transport/src/multicast/rx.rs b/io/zenoh-transport/src/multicast/rx.rs index 14f2fd619c..5cf714210f 100644 --- a/io/zenoh-transport/src/multicast/rx.rs +++ b/io/zenoh-transport/src/multicast/rx.rs @@ -269,7 +269,7 @@ impl TransportMulticastInner { let r_guard = zread!(self.peers); match r_guard.get(&locator) { Some(peer) => { - peer.active(); + peer.set_active(); match msg.body { TransportBody::Frame(msg) => self.handle_frame(msg, peer)?, TransportBody::Fragment(fragment) => { diff --git a/io/zenoh-transport/src/multicast/shm.rs b/io/zenoh-transport/src/multicast/shm.rs index 4e4c84d2a7..060198d927 100644 --- a/io/zenoh-transport/src/multicast/shm.rs +++ b/io/zenoh-transport/src/multicast/shm.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::sync::RwLock; use rand::{Rng, SeedableRng}; +use tokio::sync::RwLock; use zenoh_crypto::PseudoRng; use zenoh_result::ZResult; use zenoh_shm::{SharedMemoryManager, SharedMemoryReader}; diff --git a/io/zenoh-transport/src/multicast/transport.rs b/io/zenoh-transport/src/multicast/transport.rs index b8aa41b253..c647730390 100644 --- a/io/zenoh-transport/src/multicast/transport.rs +++ b/io/zenoh-transport/src/multicast/transport.rs @@ -21,7 +21,6 @@ use crate::{ }, TransportManager, TransportPeer, TransportPeerEventHandler, }; -use async_trait::async_trait; use std::{ collections::HashMap, sync::{ @@ -30,6 +29,7 @@ use std::{ }, time::Duration, }; +use tokio_util::sync::CancellationToken; use zenoh_core::{zcondfeat, zread, zwrite}; use zenoh_link::{Link, Locator}; use zenoh_protocol::core::Resolution; @@ -39,7 +39,7 @@ use zenoh_protocol::{ transport::{close, Join}, }; use zenoh_result::{bail, ZResult}; -use zenoh_util::{Timed, TimedEvent, TimedHandle, Timer}; +// use zenoh_util::{Timed, TimedEvent, TimedHandle, Timer}; /*************************************/ /* TRANSPORT */ @@ -52,15 +52,15 @@ pub(super) struct TransportMulticastPeer { pub(super) whatami: WhatAmI, pub(super) resolution: Resolution, pub(super) lease: Duration, - pub(super) whatchdog: Arc, - pub(super) handle: TimedHandle, + pub(super) is_active: Arc, + token: CancellationToken, pub(super) priority_rx: Box<[TransportPriorityRx]>, pub(super) handler: Arc, } impl TransportMulticastPeer { - pub(super) fn active(&self) { - self.whatchdog.store(true, Ordering::Release); + pub(super) fn set_active(&self) { + self.is_active.store(true, Ordering::Release); } pub(super) fn is_qos(&self) -> bool { @@ -68,25 +68,6 @@ impl TransportMulticastPeer { } } -#[derive(Clone)] -pub(super) struct TransportMulticastPeerLeaseTimer { - pub(super) whatchdog: Arc, - locator: Locator, - transport: TransportMulticastInner, -} - -#[async_trait] -impl Timed for TransportMulticastPeerLeaseTimer { - async fn run(&mut self) { - let is_active = self.whatchdog.swap(false, Ordering::AcqRel); - if !is_active { - let _ = self - .transport - .del_peer(&self.locator, close::reason::EXPIRED); - } - } -} - #[derive(Clone)] pub(crate) struct TransportMulticastInner { // The manager this channel is associated to @@ -101,8 +82,8 @@ pub(crate) struct TransportMulticastInner { pub(super) link: Arc>>, // The callback pub(super) callback: Arc>>>, - // The timer for peer leases - pub(super) timer: Arc, + // token for safe cancellation + token: CancellationToken, // Transport statistics #[cfg(feature = "stats")] pub(super) stats: Arc, @@ -134,7 +115,7 @@ impl TransportMulticastInner { locator: config.link.link.get_dst().to_owned(), link: Arc::new(RwLock::new(None)), callback: Arc::new(RwLock::new(None)), - timer: Arc::new(Timer::new(false)), + token: CancellationToken::new(), #[cfg(feature = "stats")] stats, }; @@ -202,6 +183,9 @@ impl TransportMulticastInner { cb.closed(); } + // TODO(yuyuan): use CancellationToken to unify the termination with the above + self.token.cancel(); + Ok(()) } @@ -257,7 +241,7 @@ impl TransportMulticastInner { sn_resolution: self.manager.config.resolution.get(Field::FrameSN), batch_size, }; - l.start_tx(config, self.priority_tx.clone(), &self.manager.tx_executor); + l.start_tx(config, self.priority_tx.clone()); Ok(()) } None => { @@ -382,15 +366,33 @@ impl TransportMulticastInner { ); // Create lease event - let whatchdog = Arc::new(AtomicBool::new(false)); - let event = TransportMulticastPeerLeaseTimer { - whatchdog: whatchdog.clone(), - locator: locator.clone(), - transport: self.clone(), + // TODO(yuyuan): refine the clone behaviors + let is_active = Arc::new(AtomicBool::new(false)); + let c_is_active = is_active.clone(); + let token = self.token.child_token(); + let c_token = token.clone(); + let c_self = self.clone(); + let c_locator = locator.clone(); + let task = async move { + let mut interval = + tokio::time::interval_at(tokio::time::Instant::now() + join.lease, join.lease); + loop { + tokio::select! { + _ = interval.tick() => { + if !c_is_active.swap(false, Ordering::AcqRel) { + break + } + } + _ = c_token.cancelled() => break + } + } + let _ = c_self.del_peer(&c_locator, close::reason::EXPIRED); }; - let event = TimedEvent::periodic(join.lease, event); - let handle = event.get_handle(); + // TODO(yuyuan): Put it into TaskTracker or store as JoinHandle + zenoh_runtime::ZRuntime::Acceptor.spawn(task); + + // TODO(yuyuan): Integrate the above async task into TransportMulticastPeer // Store the new peer let peer = TransportMulticastPeer { version: join.version, @@ -399,16 +401,13 @@ impl TransportMulticastInner { whatami: peer.whatami, resolution: join.resolution, lease: join.lease, - whatchdog, - handle, + is_active, + token, priority_rx, handler, }; zwrite!(self.peers).insert(locator.clone(), peer); - // Add the event to the timer - self.timer.add(event); - Ok(()) } @@ -423,8 +422,9 @@ impl TransportMulticastInner { self.locator, reason ); - peer.handle.clone().defuse(); + // TODO(yuyuan): Unify the termination + peer.token.cancel(); peer.handler.closing(); drop(guard); peer.handler.closed(); diff --git a/io/zenoh-transport/src/shm.rs b/io/zenoh-transport/src/shm.rs index 04a8f502c4..7b87f038e5 100644 --- a/io/zenoh-transport/src/shm.rs +++ b/io/zenoh-transport/src/shm.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::{sync::RwLock, task}; +use tokio::sync::RwLock; use zenoh_buffers::{reader::HasReader, writer::HasWriter, ZBuf, ZSlice, ZSliceKind}; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_core::{zasyncread, zasyncwrite, zerror}; @@ -249,10 +249,14 @@ pub fn map_zslice_to_shmbuf( let shmbinfo: SharedMemoryBufInfo = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; // First, try in read mode allowing concurrenct lookups - let r_guard = task::block_on(async { zasyncread!(shmr) }); + let r_guard = tokio::task::block_in_place(|| { + tokio::runtime::Handle::current().block_on(async { zasyncread!(shmr) }) + }); let smb = r_guard.try_read_shmbuf(&shmbinfo).or_else(|_| { drop(r_guard); - let mut w_guard = task::block_on(async { zasyncwrite!(shmr) }); + let mut w_guard = tokio::task::block_in_place(|| { + tokio::runtime::Handle::current().block_on(async { zasyncwrite!(shmr) }) + }); w_guard.read_shmbuf(&shmbinfo) })?; diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index 72e676f6ec..c1a1a8c16c 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -25,10 +25,10 @@ use crate::{ }, TransportManager, }; -use async_std::sync::Mutex; use async_trait::async_trait; use rand::Rng; use std::time::Duration; +use tokio::sync::Mutex; use zenoh_buffers::{reader::HasReader, writer::HasWriter, ZSlice}; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_core::{zasynclock, zcondfeat, zerror}; diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs index 99a11ee3a9..beab85d18a 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs @@ -17,13 +17,13 @@ pub(crate) mod pubkey; pub(crate) mod usrpwd; use crate::unicast::establishment::{AcceptFsm, OpenFsm}; -use async_std::sync::{Mutex, RwLock}; use async_trait::async_trait; #[cfg(feature = "auth_pubkey")] pub use pubkey::*; use rand::{CryptoRng, Rng}; use std::convert::TryInto; use std::marker::PhantomData; +use tokio::sync::{Mutex, RwLock}; #[cfg(feature = "auth_usrpwd")] pub use usrpwd::*; use zenoh_buffers::reader::SiphonableReader; @@ -62,9 +62,7 @@ impl Auth { Ok(Self { #[cfg(feature = "auth_pubkey")] - pubkey: AuthPubKey::from_config(auth.pubkey()) - .await? - .map(RwLock::new), + pubkey: AuthPubKey::from_config(auth.pubkey())?.map(RwLock::new), #[cfg(feature = "auth_usrpwd")] usrpwd: AuthUsrPwd::from_config(auth.usrpwd()) .await? diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs index 25ecc0e24e..878b058f31 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // use crate::unicast::establishment::{ext::auth::id, AcceptFsm, OpenFsm}; -use async_std::sync::{Mutex, RwLock}; use async_trait::async_trait; use rand::Rng; use rsa::{ @@ -21,6 +20,7 @@ use rsa::{ BigUint, Pkcs1v15Encrypt, RsaPrivateKey, RsaPublicKey, }; use std::{collections::HashSet, fmt, ops::Deref, path::Path}; +use tokio::sync::{Mutex, RwLock}; use zenoh_buffers::{ reader::{DidntRead, HasReader, Reader}, writer::{DidntWrite, HasWriter, Writer}, @@ -76,7 +76,7 @@ impl AuthPubKey { Ok(()) } - pub async fn from_config(config: &PubKeyConf) -> ZResult> { + pub fn from_config(config: &PubKeyConf) -> ZResult> { const S: &str = "PubKey extension - From config."; // First, check if PEM keys are provided diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs index 5cbe122edd..f66a8fd53d 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs @@ -12,10 +12,10 @@ // ZettaScale Zenoh Team, // use crate::unicast::establishment::{ext::auth::id, AcceptFsm, OpenFsm}; -use async_std::{fs, sync::RwLock}; use async_trait::async_trait; use rand::{CryptoRng, Rng}; use std::{collections::HashMap, fmt}; +use tokio::sync::RwLock; use zenoh_buffers::{ reader::{DidntRead, HasReader, Reader}, writer::{DidntWrite, HasWriter, Writer}, @@ -68,7 +68,7 @@ impl AuthUsrPwd { let mut lookup: HashMap = HashMap::new(); if let Some(dict) = config.dictionary_file() { - let content = fs::read_to_string(dict) + let content = tokio::fs::read_to_string(dict) .await .map_err(|e| zerror!("{S} Invalid user-password dictionary file: {}.", e))?; @@ -448,10 +448,8 @@ impl<'a> AcceptFsm for &'a AuthUsrPwdFsm<'a> { } mod tests { - #[test] - fn authenticator_usrpwd_config() { - use zenoh_core::zasync_executor_init; - + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn authenticator_usrpwd_config() { async fn inner() { use super::AuthUsrPwd; use std::{fs::File, io::Write}; @@ -504,9 +502,6 @@ mod tests { let _ = std::fs::remove_file(f1); } - async_std::task::block_on(async { - zasync_executor_init!(); - inner().await; - }); + inner().await; } } diff --git a/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs b/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs index 9c3c584c70..f8e74779cf 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs @@ -15,10 +15,10 @@ use crate::unicast::establishment::{ ext::auth::pubkey::{self, AuthPubKey, AuthPubKeyFsm, ZPublicKey}, AcceptFsm, OpenFsm, }; -use async_std::sync::{Mutex, RwLock}; use async_trait::async_trait; use rand::{CryptoRng, Rng}; use rsa::{BigUint, RsaPrivateKey, RsaPublicKey}; +use tokio::sync::{Mutex, RwLock}; use zenoh_buffers::{ reader::{DidntRead, HasReader, Reader}, writer::{DidntWrite, Writer}, diff --git a/io/zenoh-transport/src/unicast/lowlatency/link.rs b/io/zenoh-transport/src/unicast/lowlatency/link.rs index 6a382f5960..43e4516aa5 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/link.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/link.rs @@ -14,28 +14,30 @@ use super::transport::TransportUnicastLowlatency; #[cfg(feature = "stats")] use crate::stats::TransportStats; +use crate::unicast::link::TransportLinkUnicast; use crate::unicast::link::TransportLinkUnicastRx; -use crate::{unicast::link::TransportLinkUnicast, TransportExecutor}; -use async_std::task; -use async_std::{prelude::FutureExt, sync::RwLock}; use std::sync::Arc; use std::time::Duration; +use tokio::sync::RwLock; +use tokio_util::sync::CancellationToken; use zenoh_buffers::{writer::HasWriter, ZSlice}; use zenoh_codec::*; use zenoh_core::{zasyncread, zasyncwrite}; -use zenoh_protocol::transport::{KeepAlive, TransportBodyLowLatency, TransportMessageLowLatency}; +use zenoh_link::LinkUnicast; +use zenoh_protocol::transport::TransportMessageLowLatency; +use zenoh_protocol::transport::{KeepAlive, TransportBodyLowLatency}; use zenoh_result::{zerror, ZResult}; -use zenoh_sync::RecyclingObjectPool; +use zenoh_runtime::ZRuntime; pub(crate) async fn send_with_link( - link: &TransportLinkUnicast, + link: &LinkUnicast, msg: TransportMessageLowLatency, #[cfg(feature = "stats")] stats: &Arc, ) -> ZResult<()> { let len; - if link.link.is_streamed() { + let codec = Zenoh080::new(); + if link.is_streamed() { let mut buffer = vec![0, 0, 0, 0]; - let codec = Zenoh080::new(); let mut writer = buffer.writer(); codec .write(&mut writer, &msg) @@ -46,10 +48,9 @@ pub(crate) async fn send_with_link( buffer[0..4].copy_from_slice(&le); - link.link.write_all(&buffer).await?; + link.write_all(&buffer).await?; } else { let mut buffer = vec![]; - let codec = Zenoh080::new(); let mut writer = buffer.writer(); codec .write(&mut writer, &msg) @@ -59,7 +60,7 @@ pub(crate) async fn send_with_link( { len = buffer.len() as u32; } - link.link.write_all(&buffer).await?; + link.write_all(&buffer).await?; } log::trace!("Sent: {:?}", msg); @@ -71,14 +72,35 @@ pub(crate) async fn send_with_link( Ok(()) } +pub(crate) async fn read_with_link( + link: &TransportLinkUnicastRx, + buffer: &mut [u8], + is_streamed: bool, +) -> ZResult { + if is_streamed { + // 16 bits for reading the batch length + let mut length = [0_u8; 4]; + link.link.read_exact(&mut length).await?; + let n = u32::from_le_bytes(length) as usize; + let len = buffer.len(); + let b = buffer.get_mut(0..n).ok_or_else(|| { + zerror!("Batch len is invalid. Received {n} but negotiated max len is {len}.") + })?; + link.link.read_exact(b).await?; + Ok(n) + } else { + link.link.read(buffer).await + } +} + impl TransportUnicastLowlatency { pub(super) fn send(&self, msg: TransportMessageLowLatency) -> ZResult<()> { - async_std::task::block_on(self.send_async(msg)) + zenoh_runtime::ZRuntime::TX.block_in_place(self.send_async(msg)) } pub(super) async fn send_async(&self, msg: TransportMessageLowLatency) -> ZResult<()> { let guard = zasyncwrite!(self.link); - let link = guard.as_ref().ok_or_else(|| zerror!("No link"))?; + let link = &guard.as_ref().ok_or_else(|| zerror!("No link"))?.link; send_with_link( link, msg, @@ -88,13 +110,14 @@ impl TransportUnicastLowlatency { .await } - pub(super) fn start_keepalive(&self, executor: &TransportExecutor, keep_alive: Duration) { - let mut guard = async_std::task::block_on(async { zasyncwrite!(self.handle_keepalive) }); + pub(super) fn start_keepalive(&self, keep_alive: Duration) { let c_transport = self.clone(); - let handle = executor.spawn(async move { + let token = self.token.child_token(); + let task = async move { let res = keepalive_task( c_transport.link.clone(), keep_alive, + token, #[cfg(feature = "stats")] c_transport.stats.clone(), ) @@ -112,34 +135,61 @@ impl TransportUnicastLowlatency { ); let _ = c_transport.finalize(0).await; } - }); - *guard = Some(handle); - } - - pub(super) async fn stop_keepalive(&self) { - let zid = self.manager.config.zid; - log::debug!("[{}] Stopping keepalive task...", zid,); - let mut guard = zasyncwrite!(self.handle_keepalive); - let handle = guard.take(); - drop(guard); - - if let Some(handle) = handle { - let _ = handle.cancel().await; - log::debug!("[{}] keepalive task stopped...", zid,); - } + }; + self.tracker.spawn_on(task, &ZRuntime::TX); } pub(super) fn internal_start_rx(&self, lease: Duration) { - let mut guard = async_std::task::block_on(async { zasyncwrite!(self.handle_rx) }); + let rx_buffer_size = self.manager.config.link_rx_buffer_size; + let token = self.token.child_token(); + let c_transport = self.clone(); - let handle = task::spawn(async move { + let task = async move { let guard = zasyncread!(c_transport.link); - let link = guard.as_ref().unwrap().rx(); + let link_rx = guard.as_ref().unwrap().rx(); drop(guard); - let rx_buffer_size = c_transport.manager.config.link_rx_buffer_size; - // Start the rx task - let res = rx_task(link, c_transport.clone(), lease, rx_buffer_size).await; + let is_streamed = link_rx.link.is_streamed(); + + // The pool of buffers + let pool = { + let mtu = if is_streamed { + link_rx.batch.mtu as usize + } else { + link_rx.batch.max_buffer_size() + }; + let mut n = rx_buffer_size / mtu; + if rx_buffer_size % mtu != 0 { + n += 1; + } + zenoh_sync::RecyclingObjectPool::new(n, move || vec![0_u8; mtu].into_boxed_slice()) + }; + + let res = loop { + // Retrieve one buffer + let mut buffer = pool.try_take().unwrap_or_else(|| pool.alloc()); + + tokio::select! { + // Async read from the underlying link + res = tokio::time::timeout(lease, read_with_link(&link_rx, &mut buffer, is_streamed)) => { + let bytes = res.map_err(|_| zerror!("{}: expired after {} milliseconds", link_rx, lease.as_millis()))??; + + #[cfg(feature = "stats")] { + let header_bytes = if is_streamed { 2 } else { 0 }; + c_transport.stats.inc_rx_bytes(header_bytes + bytes); // Account for the batch len encoding (16 bits) + } + + // Deserialize all the messages from the current ZBuf + let zslice = ZSlice::make(Arc::new(buffer), 0, bytes).unwrap(); + c_transport.read_messages(zslice, &link_rx.link).await?; + } + + _ = token.cancelled() => { + break ZResult::Ok(()); + } + } + }; + log::debug!( "[{}] Rx task finished with result {:?}", c_transport.manager.config.zid, @@ -153,21 +203,10 @@ impl TransportUnicastLowlatency { ); let _ = c_transport.finalize(0).await; } - }); - *guard = Some(handle); - } - - pub(super) async fn stop_rx(&self) { - let zid = self.manager.config.zid; - log::debug!("[{}] Stopping rx task...", zid,); - let mut guard = zasyncwrite!(self.handle_rx); - let handle = guard.take(); - drop(guard); + ZResult::Ok(()) + }; - if let Some(handle) = handle { - let _ = handle.cancel().await; - log::debug!("[{}] rx task stopped...", zid,); - } + self.tracker.spawn_on(task, &ZRuntime::TX); } } @@ -177,117 +216,33 @@ impl TransportUnicastLowlatency { async fn keepalive_task( link: Arc>>, keep_alive: Duration, + token: CancellationToken, #[cfg(feature = "stats")] stats: Arc, ) -> ZResult<()> { - loop { - async_std::task::sleep(keep_alive).await; - - let keepailve = TransportMessageLowLatency { - body: TransportBodyLowLatency::KeepAlive(KeepAlive), - }; - - let guard = zasyncwrite!(link); - let link = guard.as_ref().ok_or_else(|| zerror!("No link"))?; - let _ = send_with_link( - link, - keepailve, - #[cfg(feature = "stats")] - &stats, - ) - .await; - drop(guard); - } -} - -async fn rx_task_stream( - link: TransportLinkUnicastRx, - transport: TransportUnicastLowlatency, - lease: Duration, - rx_buffer_size: usize, -) -> ZResult<()> { - async fn read(link: &TransportLinkUnicastRx, buffer: &mut [u8]) -> ZResult { - // 16 bits for reading the batch length - let mut length = [0_u8, 0_u8, 0_u8, 0_u8]; - link.link.read_exact(&mut length).await?; - let n = u32::from_le_bytes(length) as usize; - let len = buffer.len(); - let b = buffer.get_mut(0..n).ok_or_else(|| { - zerror!("Batch len is invalid. Received {n} but negotiated max len is {len}.") - })?; - link.link.read_exact(b).await?; - Ok(n) - } - - // The pool of buffers - let mtu = link.batch.mtu as usize; - let mut n = rx_buffer_size / mtu; - if rx_buffer_size % mtu != 0 { - n += 1; - } - - let pool = RecyclingObjectPool::new(n, || vec![0_u8; mtu].into_boxed_slice()); - loop { - // Retrieve one buffer - let mut buffer = pool.try_take().unwrap_or_else(|| pool.alloc()); - - // Async read from the underlying link - let bytes = read(&link, &mut buffer) - .timeout(lease) - .await - .map_err(|_| zerror!("{}: expired after {} milliseconds", link, lease.as_millis()))??; - #[cfg(feature = "stats")] - transport.stats.inc_rx_bytes(2 + bytes); // Account for the batch len encoding (16 bits) - - // Deserialize all the messages from the current ZBuf - let zslice = ZSlice::make(Arc::new(buffer), 0, bytes).unwrap(); - transport.read_messages(zslice, &link.link).await?; - } -} - -async fn rx_task_dgram( - link: TransportLinkUnicastRx, - transport: TransportUnicastLowlatency, - lease: Duration, - rx_buffer_size: usize, -) -> ZResult<()> { - // The pool of buffers - let mtu = link.batch.max_buffer_size(); - let mut n = rx_buffer_size / mtu; - if rx_buffer_size % mtu != 0 { - n += 1; - } + let mut interval = + tokio::time::interval_at(tokio::time::Instant::now() + keep_alive, keep_alive); - let pool = RecyclingObjectPool::new(n, || vec![0_u8; mtu].into_boxed_slice()); loop { - // Retrieve one buffer - let mut buffer = pool.try_take().unwrap_or_else(|| pool.alloc()); - - // Async read from the underlying link - let bytes = link - .link - .read(&mut buffer) - .timeout(lease) - .await - .map_err(|_| zerror!("{}: expired after {} milliseconds", link, lease.as_millis()))??; - - #[cfg(feature = "stats")] - transport.stats.inc_rx_bytes(bytes); - - // Deserialize all the messages from the current ZBuf - let zslice = ZSlice::make(Arc::new(buffer), 0, bytes).unwrap(); - transport.read_messages(zslice, &link.link).await?; - } -} + tokio::select! { + _ = interval.tick() => { + let keepailve = TransportMessageLowLatency { + body: TransportBodyLowLatency::KeepAlive(KeepAlive), + }; + + let guard = zasyncwrite!(link); + let link = &guard.as_ref().ok_or_else(|| zerror!("No link"))?.link; + let _ = send_with_link( + link, + keepailve, + #[cfg(feature = "stats")] + &stats, + ) + .await; + drop(guard); + } -async fn rx_task( - link: TransportLinkUnicastRx, - transport: TransportUnicastLowlatency, - lease: Duration, - rx_buffer_size: usize, -) -> ZResult<()> { - if link.link.is_streamed() { - rx_task_stream(link, transport, lease, rx_buffer_size).await - } else { - rx_task_dgram(link, transport, lease, rx_buffer_size).await + _ = token.cancelled() => break, + } } + Ok(()) } diff --git a/io/zenoh-transport/src/unicast/lowlatency/transport.rs b/io/zenoh-transport/src/unicast/lowlatency/transport.rs index afc7d3c849..283c143499 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/transport.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/transport.rs @@ -21,12 +21,12 @@ use crate::{ }, TransportManager, TransportPeerEventHandler, }; -use async_executor::Task; -use async_std::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard, RwLock}; -use async_std::task::JoinHandle; use async_trait::async_trait; use std::sync::{Arc, RwLock as SyncRwLock}; use std::time::Duration; +use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard, RwLock}; +use tokio_util::sync::CancellationToken; +use tokio_util::task::TaskTracker; use zenoh_core::{zasynclock, zasyncread, zasyncwrite, zread, zwrite}; use zenoh_link::Link; use zenoh_protocol::network::NetworkMessage; @@ -59,8 +59,8 @@ pub(crate) struct TransportUnicastLowlatency { pub(super) stats: Arc, // The handles for TX/RX tasks - pub(crate) handle_keepalive: Arc>>>, - pub(crate) handle_rx: Arc>>>, + pub(crate) token: CancellationToken, + pub(crate) tracker: TaskTracker, } impl TransportUnicastLowlatency { @@ -78,8 +78,8 @@ impl TransportUnicastLowlatency { alive: Arc::new(AsyncMutex::new(false)), #[cfg(feature = "stats")] stats, - handle_keepalive: Arc::new(RwLock::new(None)), - handle_rx: Arc::new(RwLock::new(None)), + token: CancellationToken::new(), + tracker: TaskTracker::new(), }) as Arc } @@ -127,8 +127,12 @@ impl TransportUnicastLowlatency { let _ = self.manager.del_transport_unicast(&self.config.zid).await; // Close and drop the link - self.stop_keepalive().await; - self.stop_rx().await; + self.token.cancel(); + self.tracker.close(); + self.tracker.wait().await; + // self.stop_keepalive().await; + // self.stop_rx().await; + if let Some(val) = zasyncwrite!(self.link).as_ref() { let _ = val.close(Some(close::reason::GENERIC)).await; } @@ -170,7 +174,9 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { } fn get_links(&self) -> Vec { - let guard = async_std::task::block_on(async { zasyncread!(self.link) }); + let handle = tokio::runtime::Handle::current(); + let guard = + tokio::task::block_in_place(|| handle.block_on(async { zasyncread!(self.link) })); if let Some(val) = guard.as_ref() { return [val.link()].to_vec(); } @@ -244,13 +250,13 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { // start keepalive task let keep_alive = self.manager.config.unicast.lease / self.manager.config.unicast.keep_alive as u32; - self.start_keepalive(&self.manager.tx_executor, keep_alive); + self.start_keepalive(keep_alive); // start RX task self.internal_start_rx(other_lease); }); - return Ok((start_link, ack)); + Ok((start_link, ack)) } /*************************************/ diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index 2328e78a76..eaf25cd2a3 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -27,12 +27,15 @@ use crate::{ }, TransportManager, TransportPeer, }; -use async_std::{ - prelude::FutureExt, - sync::{Mutex, MutexGuard}, - task, +use std::{ + collections::HashMap, + sync::{ + atomic::{AtomicUsize, Ordering::SeqCst}, + Arc, + }, + time::Duration, }; -use std::{collections::HashMap, sync::Arc, time::Duration}; +use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; #[cfg(feature = "transport_compression")] use zenoh_config::CompressionUnicastConf; #[cfg(feature = "shared-memory")] @@ -68,11 +71,11 @@ pub struct TransportManagerConfigUnicast { pub struct TransportManagerStateUnicast { // Incoming uninitialized transports - pub(super) incoming: Arc>, + pub(super) incoming: Arc, // Established listeners - pub(super) protocols: Arc>>, + pub(super) protocols: Arc>>, // Established transports - pub(super) transports: Arc>>>, + pub(super) transports: Arc>>>, // Multilink #[cfg(feature = "transport_multilink")] pub(super) multilink: Arc, @@ -230,9 +233,9 @@ impl TransportManagerBuilderUnicast { }; let state = TransportManagerStateUnicast { - incoming: Arc::new(Mutex::new(0)), - protocols: Arc::new(Mutex::new(HashMap::new())), - transports: Arc::new(Mutex::new(HashMap::new())), + incoming: Arc::new(AtomicUsize::new(0)), + protocols: Arc::new(AsyncMutex::new(HashMap::new())), + transports: Arc::new(AsyncMutex::new(HashMap::new())), #[cfg(feature = "transport_multilink")] multilink: Arc::new(MultiLink::make(prng)?), #[cfg(feature = "shared-memory")] @@ -299,7 +302,7 @@ impl TransportManager { .collect::>>(); for pl in pl_guard.drain(..) { - for ep in pl.get_listeners().iter() { + for ep in pl.get_listeners().await.iter() { let _ = pl.del_listener(ep).await; } } @@ -388,7 +391,7 @@ impl TransportManager { .get_link_manager_unicast(endpoint.protocol().as_str()) .await?; lm.del_listener(endpoint).await?; - if lm.get_listeners().is_empty() { + if lm.get_listeners().await.is_empty() { self.del_link_manager_unicast(endpoint.protocol().as_str()) .await?; } @@ -398,7 +401,7 @@ impl TransportManager { pub async fn get_listeners_unicast(&self) -> Vec { let mut vec: Vec = vec![]; for p in zasynclock!(self.state.unicast.protocols).values() { - vec.extend_from_slice(&p.get_listeners()); + vec.extend_from_slice(&p.get_listeners().await); } vec } @@ -406,7 +409,7 @@ impl TransportManager { pub async fn get_locators_unicast(&self) -> Vec { let mut vec: Vec = vec![]; for p in zasynclock!(self.state.unicast.protocols).values() { - vec.extend_from_slice(&p.get_locators()); + vec.extend_from_slice(&p.get_locators().await); } vec } @@ -501,7 +504,7 @@ impl TransportManager { link: LinkUnicastWithOpenAck, other_initial_sn: TransportSn, other_lease: Duration, - mut guard: MutexGuard<'_, HashMap>>, + mut guard: AsyncMutexGuard<'_, HashMap>>, ) -> InitTransportResult { macro_rules! link_error { ($s:expr, $reason:expr) => { @@ -723,8 +726,8 @@ impl TransportManager { } pub(crate) async fn handle_new_link_unicast(&self, link: LinkUnicast) { - let mut guard = zasynclock!(self.state.unicast.incoming); - if *guard >= self.config.unicast.accept_pending { + let incoming_counter = self.state.unicast.incoming.clone(); + if incoming_counter.load(SeqCst) >= self.config.unicast.accept_pending { // We reached the limit of concurrent incoming transport, this means two things: // - the values configured for ZN_OPEN_INCOMING_PENDING and ZN_OPEN_TIMEOUT // are too small for the scenario zenoh is deployed in; @@ -737,20 +740,20 @@ impl TransportManager { // A new link is available log::trace!("Accepting link... {}", link); - *guard += 1; - drop(guard); + self.state.unicast.incoming.fetch_add(1, SeqCst); // Spawn a task to accept the link let c_manager = self.clone(); - task::spawn(async move { - if let Err(e) = super::establishment::accept::accept_link(link, &c_manager) - .timeout(c_manager.config.unicast.accept_timeout) - .await + zenoh_runtime::ZRuntime::Acceptor.spawn(async move { + if let Err(e) = tokio::time::timeout( + c_manager.config.unicast.accept_timeout, + super::establishment::accept::accept_link(link, &c_manager), + ) + .await { log::debug!("{}", e); } - let mut guard = zasynclock!(c_manager.state.unicast.incoming); - *guard -= 1; + incoming_counter.fetch_sub(1, SeqCst); }); } } diff --git a/io/zenoh-transport/src/unicast/shared_memory_unicast.rs b/io/zenoh-transport/src/unicast/shared_memory_unicast.rs index ce940444af..881e6886d2 100644 --- a/io/zenoh-transport/src/unicast/shared_memory_unicast.rs +++ b/io/zenoh-transport/src/unicast/shared_memory_unicast.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::sync::RwLock; use rand::{Rng, SeedableRng}; +use tokio::sync::RwLock; use zenoh_core::zerror; use zenoh_crypto::PseudoRng; use zenoh_result::ZResult; diff --git a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs index 92093959dd..f6dc39529d 100644 --- a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs +++ b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs @@ -16,9 +16,9 @@ use crate::{ unicast::{link::TransportLinkUnicast, TransportConfigUnicast}, TransportPeerEventHandler, }; -use async_std::sync::MutexGuard as AsyncMutexGuard; use async_trait::async_trait; use std::{fmt::DebugStruct, sync::Arc, time::Duration}; +use tokio::sync::MutexGuard as AsyncMutexGuard; use zenoh_link::Link; use zenoh_protocol::{ core::{WhatAmI, ZenohId}, diff --git a/io/zenoh-transport/src/unicast/universal/link.rs b/io/zenoh-transport/src/unicast/universal/link.rs index 513cefc0a6..5c07b69738 100644 --- a/io/zenoh-transport/src/unicast/universal/link.rs +++ b/io/zenoh-transport/src/unicast/universal/link.rs @@ -12,8 +12,6 @@ // ZettaScale Zenoh Team, // use super::transport::TransportUnicastUniversal; -#[cfg(feature = "stats")] -use crate::common::stats::TransportStats; use crate::{ common::{ batch::{BatchConfig, RBatch}, @@ -24,27 +22,15 @@ use crate::{ priority::TransportPriorityTx, }, unicast::link::{TransportLinkUnicast, TransportLinkUnicastRx, TransportLinkUnicastTx}, - TransportExecutor, -}; -use async_std::prelude::FutureExt; -use async_std::task; -use async_std::task::JoinHandle; -use std::{ - sync::{Arc, RwLock}, - time::Duration, }; +use std::time::Duration; +use tokio_util::{sync::CancellationToken, task::TaskTracker}; use zenoh_buffers::ZSliceBuffer; -use zenoh_core::zwrite; use zenoh_protocol::transport::{KeepAlive, TransportMessage}; use zenoh_result::{zerror, ZResult}; -use zenoh_sync::{RecyclingObject, RecyclingObjectPool, Signal}; - -pub(super) struct Tasks { - // The handlers to stop TX/RX tasks - handle_tx: RwLock>>, - signal_rx: Signal, - handle_rx: RwLock>>, -} +use zenoh_sync::{RecyclingObject, RecyclingObjectPool}; +#[cfg(feature = "stats")] +use {crate::common::stats::TransportStats, std::sync::Arc}; #[derive(Clone)] pub(super) struct TransportLinkUnicastUniversal { @@ -53,7 +39,8 @@ pub(super) struct TransportLinkUnicastUniversal { // The transmission pipeline pub(super) pipeline: TransmissionPipelineProducer, // The task handling substruct - tasks: Arc, + tracker: TaskTracker, + token: CancellationToken, } impl TransportLinkUnicastUniversal { @@ -78,105 +65,92 @@ impl TransportLinkUnicastUniversal { // The pipeline let (producer, consumer) = TransmissionPipeline::make(config, priority_tx); - let tasks = Arc::new(Tasks { - handle_tx: RwLock::new(None), - signal_rx: Signal::new(), - handle_rx: RwLock::new(None), - }); - let result = Self { link, pipeline: producer, - tasks, + tracker: TaskTracker::new(), + token: CancellationToken::new(), }; (result, consumer) } -} -impl TransportLinkUnicastUniversal { pub(super) fn start_tx( &mut self, transport: TransportUnicastUniversal, consumer: TransmissionPipelineConsumer, - executor: &TransportExecutor, keep_alive: Duration, ) { - let mut guard = zwrite!(self.tasks.handle_tx); - if guard.is_none() { - // Spawn the TX task - let mut tx = self.link.tx(); - let handle = executor.spawn(async move { - let res = tx_task( - consumer, - &mut tx, - keep_alive, - #[cfg(feature = "stats")] - transport.stats.clone(), - ) - .await; - if let Err(e) = res { - log::debug!("{}", e); - // Spawn a task to avoid a deadlock waiting for this same task - // to finish in the close() joining its handle - task::spawn(async move { transport.del_link(tx.inner.link()).await }); - } - }); - *guard = Some(handle); - } - } + // Spawn the TX task + let mut tx = self.link.tx(); + let token = self.token.clone(); + let task = async move { + let res = tx_task( + consumer, + &mut tx, + keep_alive, + token, + #[cfg(feature = "stats")] + transport.stats.clone(), + ) + .await; - pub(super) fn stop_tx(&mut self) { - self.pipeline.disable(); + if let Err(e) = res { + log::debug!("{}", e); + // Spawn a task to avoid a deadlock waiting for this same task + // to finish in the close() joining its handle + // TODO(yuyuan): do more study to check which ZRuntime should be used or refine the + // termination + zenoh_runtime::ZRuntime::TX + .spawn(async move { transport.del_link(tx.inner.link()).await }); + } + }; + self.tracker.spawn_on(task, &zenoh_runtime::ZRuntime::TX); } pub(super) fn start_rx(&mut self, transport: TransportUnicastUniversal, lease: Duration) { - let mut guard = zwrite!(self.tasks.handle_rx); - if guard.is_none() { - // Spawn the RX task - let mut rx = self.link.rx(); - let c_signal = self.tasks.signal_rx.clone(); - - let handle = task::spawn(async move { - // Start the consume task - let res = rx_task( - &mut rx, - transport.clone(), - lease, - c_signal.clone(), - transport.manager.config.link_rx_buffer_size, - ) - .await; - c_signal.trigger(); - if let Err(e) = res { - log::debug!("{}", e); - // Spawn a task to avoid a deadlock waiting for this same task - // to finish in the close() joining its handle - task::spawn(async move { transport.del_link((&rx.link).into()).await }); - } - }); - *guard = Some(handle); - } - } + let mut rx = self.link.rx(); + let token = self.token.clone(); + let task = async move { + // Start the consume task + let res = rx_task( + &mut rx, + transport.clone(), + lease, + transport.manager.config.link_rx_buffer_size, + token, + ) + .await; + + // TODO(yuyuan): improve this callback + if let Err(e) = res { + log::debug!("{}", e); + + // Spawn a task to avoid a deadlock waiting for this same task + // to finish in the close() joining its handle + // WARN: Must be spawned on RX + zenoh_runtime::ZRuntime::RX + .spawn(async move { transport.del_link((&rx.link).into()).await }); - pub(super) fn stop_rx(&mut self) { - self.tasks.signal_rx.trigger(); + // // WARN: This ZRuntime blocks + // zenoh_runtime::ZRuntime::Net + // .spawn(async move { transport.del_link((&rx.link).into()).await }); + + // // WARN: This cloud block + // transport.del_link((&rx.link).into()).await; + } + }; + // WARN: If this is on ZRuntime::TX, a deadlock would occur. + self.tracker.spawn_on(task, &zenoh_runtime::ZRuntime::RX); } - pub(super) async fn close(mut self) -> ZResult<()> { + pub(super) async fn close(self) -> ZResult<()> { log::trace!("{}: closing", self.link); - self.stop_tx(); - self.stop_rx(); - - let handle_tx = zwrite!(self.tasks.handle_tx).take(); - if let Some(handle) = handle_tx { - handle.await; - } - let handle_rx = zwrite!(self.tasks.handle_rx).take(); - if let Some(handle) = handle_rx { - handle.await; - } + self.tracker.close(); + self.token.cancel(); + self.pipeline.disable(); + self.tracker.wait().await; self.link.close(None).await } @@ -189,12 +163,15 @@ async fn tx_task( mut pipeline: TransmissionPipelineConsumer, link: &mut TransportLinkUnicastTx, keep_alive: Duration, + token: CancellationToken, #[cfg(feature = "stats")] stats: Arc, ) -> ZResult<()> { + let mut interval = + tokio::time::interval_at(tokio::time::Instant::now() + keep_alive, keep_alive); loop { - match pipeline.pull().timeout(keep_alive).await { - Ok(res) => match res { - Some((mut batch, priority)) => { + tokio::select! { + res = pipeline.pull() => { + if let Some((mut batch, priority)) = res { link.send_batch(&mut batch).await?; #[cfg(feature = "stats")] @@ -205,28 +182,32 @@ async fn tx_task( // Reinsert the batch into the queue pipeline.refill(batch, priority); + } else { + break } - None => break, - }, - Err(_) => { + } + + _ = interval.tick() => { let message: TransportMessage = KeepAlive.into(); #[allow(unused_variables)] // Used when stats feature is enabled let n = link.send(&message).await?; + #[cfg(feature = "stats")] { stats.inc_tx_t_msgs(1); stats.inc_tx_bytes(n); } } + + _ = token.cancelled() => break } } // Drain the transmission pipeline and write remaining bytes on the wire let mut batches = pipeline.drain(); for (mut b, _) in batches.drain(..) { - link.send_batch(&mut b) - .timeout(keep_alive) + tokio::time::timeout(keep_alive, link.send_batch(&mut b)) .await .map_err(|_| zerror!("{}: flush failed after {} ms", link, keep_alive.as_millis()))??; @@ -244,18 +225,13 @@ async fn rx_task( link: &mut TransportLinkUnicastRx, transport: TransportUnicastUniversal, lease: Duration, - signal: Signal, rx_buffer_size: usize, + token: CancellationToken, ) -> ZResult<()> { - enum Action { - Read(RBatch), - Stop, - } - async fn read( link: &mut TransportLinkUnicastRx, pool: &RecyclingObjectPool, - ) -> ZResult + ) -> ZResult where T: ZSliceBuffer + 'static, F: Fn() -> T, @@ -264,12 +240,7 @@ async fn rx_task( let batch = link .recv_batch(|| pool.try_take().unwrap_or_else(|| pool.alloc())) .await?; - Ok(Action::Read(batch)) - } - - async fn stop(signal: Signal) -> ZResult { - signal.wait().await; - Ok(Action::Stop) + Ok(batch) } // The pool of buffers @@ -281,22 +252,19 @@ async fn rx_task( let pool = RecyclingObjectPool::new(n, || vec![0_u8; mtu].into_boxed_slice()); let l = (&link.link).into(); - while !signal.is_triggered() { - // Async read from the underlying link - let action = read(link, &pool) - .race(stop(signal.clone())) - .timeout(lease) - .await - .map_err(|_| zerror!("{}: expired after {} milliseconds", link, lease.as_millis()))??; - match action { - Action::Read(batch) => { + + loop { + tokio::select! { + batch = tokio::time::timeout(lease, read(link, &pool)) => { + let batch = batch.map_err(|_| zerror!("{}: expired after {} milliseconds", link, lease.as_millis()))??; #[cfg(feature = "stats")] { transport.stats.inc_rx_bytes(2 + n); // Account for the batch len encoding (16 bits) } transport.read_messages(batch, &l)?; } - Action::Stop => break, + + _ = token.cancelled() => break } } diff --git a/io/zenoh-transport/src/unicast/universal/rx.rs b/io/zenoh-transport/src/unicast/universal/rx.rs index 935a1814b0..9dfe075956 100644 --- a/io/zenoh-transport/src/unicast/universal/rx.rs +++ b/io/zenoh-transport/src/unicast/universal/rx.rs @@ -20,7 +20,6 @@ use crate::{ unicast::transport_unicast_inner::TransportUnicastTrait, TransportPeerEventHandler, }; -use async_std::task; use std::sync::MutexGuard; use zenoh_core::{zlock, zread}; use zenoh_link::Link; @@ -51,15 +50,12 @@ impl TransportUnicastUniversal { } fn handle_close(&self, link: &Link, _reason: u8, session: bool) -> ZResult<()> { - // Stop now rx and tx tasks before doing the proper cleanup - let _ = self.stop_rx_tx(link); - // Delete and clean up let c_transport = self.clone(); let c_link = link.clone(); // Spawn a task to avoid a deadlock waiting for this same task // to finish in the link close() joining the rx handle - task::spawn(async move { + zenoh_runtime::ZRuntime::Net.spawn(async move { if session { let _ = c_transport.delete().await; } else { diff --git a/io/zenoh-transport/src/unicast/universal/transport.rs b/io/zenoh-transport/src/unicast/universal/transport.rs index 942b723365..30e1bd2ecd 100644 --- a/io/zenoh-transport/src/unicast/universal/transport.rs +++ b/io/zenoh-transport/src/unicast/universal/transport.rs @@ -23,11 +23,11 @@ use crate::{ }, TransportManager, TransportPeerEventHandler, }; -use async_std::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; use async_trait::async_trait; use std::fmt::DebugStruct; use std::sync::{Arc, RwLock}; use std::time::Duration; +use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; use zenoh_core::{zasynclock, zcondfeat, zread, zwrite}; use zenoh_link::Link; use zenoh_protocol::{ @@ -44,13 +44,6 @@ macro_rules! zlinkget { }; } -macro_rules! zlinkgetmut { - ($guard:expr, $link:expr) => { - // Compare LinkUnicast link to not compare TransportLinkUnicast direction - $guard.iter_mut().find(|tl| tl.link == $link) - }; -} - macro_rules! zlinkindex { ($guard:expr, $link:expr) => { // Compare LinkUnicast link to not compare TransportLinkUnicast direction @@ -140,6 +133,7 @@ impl TransportUnicastUniversal { self.manager.config.zid, self.config.zid ); + // Mark the transport as no longer alive and keep the lock // to avoid concurrent new_transport and closing/closed notifications let mut a_guard = self.get_alive().await; @@ -217,24 +211,6 @@ impl TransportUnicastUniversal { } } - pub(crate) fn stop_rx_tx(&self, link: &Link) -> ZResult<()> { - let mut guard = zwrite!(self.links); - match zlinkgetmut!(guard, *link) { - Some(l) => { - l.stop_rx(); - l.stop_tx(); - Ok(()) - } - None => { - bail!( - "Can not stop Link RX {} with peer: {}", - link, - self.config.zid - ) - } - } - } - async fn sync(&self, initial_sn_rx: TransportSn) -> ZResult<()> { // Mark the transport as alive and keep the lock // to avoid concurrent new_transport and closing/closed notifications @@ -327,12 +303,7 @@ impl TransportUnicastTrait for TransportUnicastUniversal { // Start the TX loop let keep_alive = self.manager.config.unicast.lease / self.manager.config.unicast.keep_alive as u32; - link.start_tx( - transport.clone(), - consumer, - &self.manager.tx_executor, - keep_alive, - ); + link.start_tx(transport.clone(), consumer, keep_alive); // Start the RX loop link.start_rx(transport, other_lease); diff --git a/io/zenoh-transport/src/unicast/universal/tx.rs b/io/zenoh-transport/src/unicast/universal/tx.rs index eb41e2611c..67c783c530 100644 --- a/io/zenoh-transport/src/unicast/universal/tx.rs +++ b/io/zenoh-transport/src/unicast/universal/tx.rs @@ -31,17 +31,13 @@ impl TransportUnicastUniversal { let guard = zread!(self.links); // First try to find the best match between msg and link reliability - if let Some(pl) = guard - .iter() - .filter_map(|tl| { - if msg.is_reliable() == tl.link.link.is_reliable() { - Some(&tl.pipeline) - } else { - None - } - }) - .next() - { + if let Some(pl) = guard.iter().find_map(|tl| { + if msg.is_reliable() == tl.link.link.is_reliable() { + Some(&tl.pipeline) + } else { + None + } + }) { zpush!(guard, pl, msg); } diff --git a/io/zenoh-transport/tests/endpoints.rs b/io/zenoh-transport/tests/endpoints.rs index 2ac2084552..2f4335ca31 100644 --- a/io/zenoh-transport/tests/endpoints.rs +++ b/io/zenoh-transport/tests/endpoints.rs @@ -11,9 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::{prelude::FutureExt, task}; use std::{any::Any, convert::TryFrom, sync::Arc, time::Duration}; -use zenoh_core::zasync_executor_init; +use zenoh_core::ztimeout; use zenoh_link::{EndPoint, Link}; use zenoh_protocol::{ core::{WhatAmI, ZenohId}, @@ -30,12 +29,6 @@ const SLEEP: Duration = Duration::from_millis(100); const RUNS: usize = 10; -macro_rules! ztimeout { - ($f:expr) => { - $f.timeout(TIMEOUT).await.unwrap() - }; -} - // Transport Handler #[derive(Default)] struct SH; @@ -91,7 +84,7 @@ async fn run(endpoints: &[EndPoint]) { ztimeout!(sm.add_listener(e.clone())).unwrap(); } - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; // Delete the listeners for e in endpoints.iter() { @@ -99,52 +92,40 @@ async fn run(endpoints: &[EndPoint]) { ztimeout!(sm.del_listener(e)).unwrap(); } - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } } #[cfg(feature = "transport_tcp")] -#[test] -fn endpoint_tcp() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn endpoint_tcp() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - // Define the locators let endpoints: Vec = vec![ format!("tcp/127.0.0.1:{}", 7000).parse().unwrap(), format!("tcp/[::1]:{}", 7001).parse().unwrap(), format!("tcp/localhost:{}", 7002).parse().unwrap(), ]; - task::block_on(run(&endpoints)); + run(&endpoints).await; } #[cfg(feature = "transport_udp")] -#[test] -fn endpoint_udp() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn endpoint_udp() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - // Define the locators let endpoints: Vec = vec![ format!("udp/127.0.0.1:{}", 7010).parse().unwrap(), format!("udp/[::1]:{}", 7011).parse().unwrap(), format!("udp/localhost:{}", 7012).parse().unwrap(), ]; - task::block_on(run(&endpoints)); + run(&endpoints).await; } #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] -#[test] -fn endpoint_unix() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn endpoint_unix() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - // Remove the files if they still exists let f1 = "zenoh-test-unix-socket-0.sock"; let f2 = "zenoh-test-unix-socket-1.sock"; @@ -155,7 +136,7 @@ fn endpoint_unix() { format!("unixsock-stream/{f1}").parse().unwrap(), format!("unixsock-stream/{f2}").parse().unwrap(), ]; - task::block_on(run(&endpoints)); + run(&endpoints).await; let _ = std::fs::remove_file(f1); let _ = std::fs::remove_file(f2); let _ = std::fs::remove_file(format!("{f1}.lock")); @@ -163,30 +144,22 @@ fn endpoint_unix() { } #[cfg(feature = "transport_ws")] -#[test] -fn endpoint_ws() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn endpoint_ws() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - // Define the locators let endpoints: Vec = vec![ format!("ws/127.0.0.1:{}", 7020).parse().unwrap(), format!("ws/[::1]:{}", 7021).parse().unwrap(), format!("ws/localhost:{}", 7022).parse().unwrap(), ]; - task::block_on(run(&endpoints)); + run(&endpoints).await; } #[cfg(feature = "transport_unixpipe")] -#[test] -fn endpoint_unixpipe() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn endpoint_unixpipe() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - // Define the locators let endpoints: Vec = vec![ "unixpipe/endpoint_unixpipe".parse().unwrap(), @@ -194,17 +167,13 @@ fn endpoint_unixpipe() { "unixpipe/endpoint_unixpipe3".parse().unwrap(), "unixpipe/endpoint_unixpipe4".parse().unwrap(), ]; - task::block_on(run(&endpoints)); + run(&endpoints).await; } #[cfg(all(feature = "transport_tcp", feature = "transport_udp"))] -#[test] -fn endpoint_tcp_udp() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn endpoint_tcp_udp() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - // Define the locators let endpoints: Vec = vec![ format!("tcp/127.0.0.1:{}", 7030).parse().unwrap(), @@ -212,7 +181,7 @@ fn endpoint_tcp_udp() { format!("tcp/[::1]:{}", 7032).parse().unwrap(), format!("udp/[::1]:{}", 7033).parse().unwrap(), ]; - task::block_on(run(&endpoints)); + run(&endpoints).await; } #[cfg(all( @@ -221,13 +190,9 @@ fn endpoint_tcp_udp() { feature = "transport_unixsock-stream", target_family = "unix" ))] -#[test] -fn endpoint_tcp_udp_unix() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn endpoint_tcp_udp_unix() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - // Remove the file if it still exists let f1 = "zenoh-test-unix-socket-2.sock"; let _ = std::fs::remove_file(f1); @@ -239,7 +204,7 @@ fn endpoint_tcp_udp_unix() { format!("udp/[::1]:{}", 7043).parse().unwrap(), format!("unixsock-stream/{f1}").parse().unwrap(), ]; - task::block_on(run(&endpoints)); + run(&endpoints).await; let _ = std::fs::remove_file(f1); let _ = std::fs::remove_file(format!("{f1}.lock")); } @@ -249,13 +214,9 @@ fn endpoint_tcp_udp_unix() { feature = "transport_unixsock-stream", target_family = "unix" ))] -#[test] -fn endpoint_tcp_unix() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn endpoint_tcp_unix() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - // Remove the file if it still exists let f1 = "zenoh-test-unix-socket-3.sock"; let _ = std::fs::remove_file(f1); @@ -265,7 +226,7 @@ fn endpoint_tcp_unix() { format!("tcp/[::1]:{}", 7051).parse().unwrap(), format!("unixsock-stream/{f1}").parse().unwrap(), ]; - task::block_on(run(&endpoints)); + run(&endpoints).await; let _ = std::fs::remove_file(f1); let _ = std::fs::remove_file(format!("{f1}.lock")); } @@ -275,13 +236,9 @@ fn endpoint_tcp_unix() { feature = "transport_unixsock-stream", target_family = "unix" ))] -#[test] -fn endpoint_udp_unix() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn endpoint_udp_unix() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - // Remove the file if it still exists let f1 = "zenoh-test-unix-socket-4.sock"; let _ = std::fs::remove_file(f1); // Define the locators @@ -290,20 +247,17 @@ fn endpoint_udp_unix() { format!("udp/[::1]:{}", 7061).parse().unwrap(), format!("unixsock-stream/{f1}").parse().unwrap(), ]; - task::block_on(run(&endpoints)); + run(&endpoints).await; let _ = std::fs::remove_file(f1); let _ = std::fs::remove_file(format!("{f1}.lock")); } #[cfg(feature = "transport_tls")] -#[test] -fn endpoint_tls() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn endpoint_tls() { use zenoh_link::tls::config::*; let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real @@ -374,18 +328,15 @@ AXVFFIgCSluyrolaD6CWD9MqOex4YOfJR2bNxI7lFvuK4AwjyUJzT1U1HXib17mM .unwrap(); let endpoints = vec![endpoint]; - task::block_on(run(&endpoints)); + run(&endpoints).await; } #[cfg(feature = "transport_quic")] -#[test] -fn endpoint_quic() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn endpoint_quic() { use zenoh_link::quic::config::*; let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real @@ -455,5 +406,5 @@ AXVFFIgCSluyrolaD6CWD9MqOex4YOfJR2bNxI7lFvuK4AwjyUJzT1U1HXib17mM ) .unwrap(); let endpoints = vec![endpoint]; - task::block_on(run(&endpoints)); + run(&endpoints).await; } diff --git a/io/zenoh-transport/tests/multicast_compression.rs b/io/zenoh-transport/tests/multicast_compression.rs index f8e56a5484..5d0c9ef9ae 100644 --- a/io/zenoh-transport/tests/multicast_compression.rs +++ b/io/zenoh-transport/tests/multicast_compression.rs @@ -16,7 +16,6 @@ // on GitHub CI actions on Linux and Windows. #[cfg(all(target_family = "unix", feature = "transport_compression"))] mod tests { - use async_std::{prelude::FutureExt, task}; use std::{ any::Any, sync::{ @@ -25,7 +24,7 @@ mod tests { }, time::Duration, }; - use zenoh_core::zasync_executor_init; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ core::{ @@ -55,12 +54,6 @@ mod tests { const MSG_COUNT: usize = 1_000; const MSG_SIZE_NOFRAG: [usize; 1] = [1_024]; - macro_rules! ztimeout { - ($f:expr) => { - $f.timeout(TIMEOUT).await.unwrap() - }; - } - // Transport Handler for the peer02 struct SHPeer { count: Arc, @@ -190,7 +183,7 @@ mod tests { .await .is_none() { - task::sleep(SLEEP_COUNT).await; + tokio::time::sleep(SLEEP_COUNT).await; } }); let peer01_transport = peer01_manager @@ -208,7 +201,7 @@ mod tests { .await .is_none() { - task::sleep(SLEEP_COUNT).await; + tokio::time::sleep(SLEEP_COUNT).await; } }); let peer02_transport = peer02_manager @@ -245,7 +238,7 @@ mod tests { assert!(peer01.manager.get_transports_multicast().await.is_empty()); ztimeout!(async { while !peer02.transport.get_peers().unwrap().is_empty() { - task::sleep(SLEEP_COUNT).await; + tokio::time::sleep(SLEEP_COUNT).await; } }); @@ -255,7 +248,7 @@ mod tests { assert!(peer02.manager.get_transports_multicast().await.is_empty()); // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } async fn test_transport( @@ -293,21 +286,21 @@ mod tests { Reliability::Reliable => { ztimeout!(async { while peer02.handler.get_count() != MSG_COUNT { - task::sleep(SLEEP_COUNT).await; + tokio::time::sleep(SLEEP_COUNT).await; } }); } Reliability::BestEffort => { ztimeout!(async { while peer02.handler.get_count() == 0 { - task::sleep(SLEEP_COUNT).await; + tokio::time::sleep(SLEEP_COUNT).await; } }); } }; // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } async fn run_single(endpoint: &EndPoint, channel: Channel, msg_size: usize) { @@ -336,14 +329,10 @@ mod tests { } #[cfg(feature = "transport_udp")] - #[test] - fn transport_multicast_compression_udp_only() { + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn transport_multicast_compression_udp_only() { env_logger::init(); - task::block_on(async { - zasync_executor_init!(); - }); - // Define the locator let endpoints: Vec = vec![ format!( @@ -372,6 +361,6 @@ mod tests { }, ]; // Run - task::block_on(run(&endpoints, &channel, &MSG_SIZE_NOFRAG)); + run(&endpoints, &channel, &MSG_SIZE_NOFRAG).await; } } diff --git a/io/zenoh-transport/tests/multicast_transport.rs b/io/zenoh-transport/tests/multicast_transport.rs index ebb290af1e..96525c263c 100644 --- a/io/zenoh-transport/tests/multicast_transport.rs +++ b/io/zenoh-transport/tests/multicast_transport.rs @@ -16,7 +16,6 @@ // on GitHub CI actions on Linux and Windows. #[cfg(target_family = "unix")] mod tests { - use async_std::{prelude::FutureExt, task}; use std::{ any::Any, sync::{ @@ -25,7 +24,7 @@ mod tests { }, time::Duration, }; - use zenoh_core::zasync_executor_init; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ core::{ @@ -53,12 +52,6 @@ mod tests { const MSG_COUNT: usize = 1_000; const MSG_SIZE_NOFRAG: [usize; 1] = [1_024]; - macro_rules! ztimeout { - ($f:expr) => { - $f.timeout(TIMEOUT).await.unwrap() - }; - } - // Transport Handler for the peer02 struct SHPeer { count: Arc, @@ -186,7 +179,7 @@ mod tests { .await .is_none() { - task::sleep(SLEEP_COUNT).await; + tokio::time::sleep(SLEEP_COUNT).await; } }); let peer01_transport = peer01_manager @@ -204,7 +197,7 @@ mod tests { .await .is_none() { - task::sleep(SLEEP_COUNT).await; + tokio::time::sleep(SLEEP_COUNT).await; } }); let peer02_transport = peer02_manager @@ -241,7 +234,7 @@ mod tests { assert!(peer01.manager.get_transports_multicast().await.is_empty()); ztimeout!(async { while !peer02.transport.get_peers().unwrap().is_empty() { - task::sleep(SLEEP_COUNT).await; + tokio::time::sleep(SLEEP_COUNT).await; } }); @@ -251,7 +244,7 @@ mod tests { assert!(peer02.manager.get_transports_multicast().await.is_empty()); // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } async fn test_transport( @@ -289,21 +282,21 @@ mod tests { Reliability::Reliable => { ztimeout!(async { while peer02.handler.get_count() != MSG_COUNT { - task::sleep(SLEEP_COUNT).await; + tokio::time::sleep(SLEEP_COUNT).await; } }); } Reliability::BestEffort => { ztimeout!(async { while peer02.handler.get_count() == 0 { - task::sleep(SLEEP_COUNT).await; + tokio::time::sleep(SLEEP_COUNT).await; } }); } }; // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } async fn run_single(endpoint: &EndPoint, channel: Channel, msg_size: usize) { @@ -332,14 +325,10 @@ mod tests { } #[cfg(all(feature = "transport_compression", feature = "transport_udp"))] - #[test] - fn transport_multicast_udp_only() { + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn transport_multicast_udp_only() { env_logger::init(); - task::block_on(async { - zasync_executor_init!(); - }); - // Define the locator let endpoints: Vec = vec![ format!( @@ -368,6 +357,6 @@ mod tests { }, ]; // Run - task::block_on(run(&endpoints, &channel, &MSG_SIZE_NOFRAG)); + run(&endpoints, &channel, &MSG_SIZE_NOFRAG).await; } } diff --git a/io/zenoh-transport/tests/transport_whitelist.rs b/io/zenoh-transport/tests/transport_whitelist.rs index 5a929ed18c..da7ec67703 100644 --- a/io/zenoh-transport/tests/transport_whitelist.rs +++ b/io/zenoh-transport/tests/transport_whitelist.rs @@ -11,9 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::{prelude::FutureExt, task}; use std::{any::Any, convert::TryFrom, iter::FromIterator, sync::Arc, time::Duration}; -use zenoh_core::zasync_executor_init; +use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ core::{EndPoint, ZenohId}, @@ -28,12 +27,6 @@ use zenoh_transport::{ const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); -macro_rules! ztimeout { - ($f:expr) => { - $f.timeout(TIMEOUT).await.unwrap() - }; -} - // Transport Handler for the router struct SHRouter; @@ -112,22 +105,19 @@ async fn run(endpoints: &[EndPoint]) { println!("Listener endpoint: {e}"); let _ = ztimeout!(router_manager.add_listener_unicast(e.clone())).unwrap(); - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; println!("Open endpoint: {e}"); let _ = ztimeout!(router_manager.open_transport_unicast(e.clone())).unwrap(); - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } } #[cfg(feature = "transport_tcp")] -#[test] -fn transport_whitelist_tcp() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_whitelist_tcp() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // Define the locators let endpoints: Vec = vec![ @@ -135,17 +125,14 @@ fn transport_whitelist_tcp() { format!("tcp/[::1]:{}", 17001).parse().unwrap(), ]; // Run - task::block_on(run(&endpoints)); + run(&endpoints).await; } #[cfg(feature = "transport_unixpipe")] -#[test] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] -fn transport_whitelist_unixpipe() { +async fn transport_whitelist_unixpipe() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // Define the locators let endpoints: Vec = vec![ @@ -153,5 +140,5 @@ fn transport_whitelist_unixpipe() { "unixpipe/transport_whitelist_unixpipe2".parse().unwrap(), ]; // Run - task::block_on(run(&endpoints)); + run(&endpoints).await; } diff --git a/io/zenoh-transport/tests/unicast_authenticator.rs b/io/zenoh-transport/tests/unicast_authenticator.rs index 51e78d4ee8..d94ade1ce1 100644 --- a/io/zenoh-transport/tests/unicast_authenticator.rs +++ b/io/zenoh-transport/tests/unicast_authenticator.rs @@ -11,9 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::{prelude::FutureExt, task}; use std::{any::Any, sync::Arc, time::Duration}; -use zenoh_core::{zasync_executor_init, zasyncwrite}; +use zenoh_core::{zasyncwrite, ztimeout}; use zenoh_link::Link; use zenoh_protocol::{ core::{EndPoint, WhatAmI, ZenohId}, @@ -32,12 +31,6 @@ use zenoh_transport::{ const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_millis(100); -macro_rules! ztimeout { - ($f:expr) => { - $f.timeout(TIMEOUT).await.unwrap() - }; -} - #[cfg(test)] struct SHRouterAuthenticator; @@ -322,7 +315,7 @@ async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { // Add the locator on the router ztimeout!(router_manager.add_listener(endpoint.clone())).unwrap(); println!("Transport Authenticator PubKey [1a2]"); - let locators = router_manager.get_listeners(); + let locators = router_manager.get_listeners().await; println!("Transport Authenticator PubKey [1a2]: {locators:?}"); assert_eq!(locators.len(), 1); @@ -388,7 +381,7 @@ async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { ztimeout!(async { while !router_manager.get_transports_unicast().await.is_empty() { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } }); @@ -400,8 +393,8 @@ async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { assert!(res.is_ok()); ztimeout!(async { - while !router_manager.get_listeners().is_empty() { - task::sleep(SLEEP).await; + while !router_manager.get_listeners().await.is_empty() { + tokio::time::sleep(SLEEP).await; } }); @@ -411,7 +404,7 @@ async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { ztimeout!(router_manager.close()); // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } #[cfg(feature = "auth_usrpwd")] @@ -527,7 +520,7 @@ async fn auth_usrpwd(endpoint: &EndPoint, lowlatency_transport: bool) { println!("Transport Authenticator UserPassword [1a1]: {res:?}"); assert!(res.is_ok()); println!("Transport Authenticator UserPassword [1a2]"); - let locators = router_manager.get_listeners(); + let locators = router_manager.get_listeners().await; println!("Transport Authenticator UserPassword [1a2]: {locators:?}"); assert_eq!(locators.len(), 1); @@ -548,7 +541,7 @@ async fn auth_usrpwd(endpoint: &EndPoint, lowlatency_transport: bool) { ztimeout!(async { while !router_manager.get_transports_unicast().await.is_empty() { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } }); @@ -605,7 +598,7 @@ async fn auth_usrpwd(endpoint: &EndPoint, lowlatency_transport: bool) { ztimeout!(async { while !router_manager.get_transports_unicast().await.is_empty() { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } }); @@ -617,13 +610,13 @@ async fn auth_usrpwd(endpoint: &EndPoint, lowlatency_transport: bool) { assert!(res.is_ok()); ztimeout!(async { - while !router_manager.get_listeners().is_empty() { - task::sleep(SLEEP).await; + while !router_manager.get_listeners().await.is_empty() { + tokio::time::sleep(SLEEP).await; } }); // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } async fn run(endpoint: &EndPoint, lowlatency_transport: bool) { @@ -642,132 +635,93 @@ async fn run_with_lowlatency_transport(endpoint: &EndPoint) { } #[cfg(feature = "transport_tcp")] -#[test] -fn authenticator_tcp() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn authenticator_tcp() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 8000).parse().unwrap(); - task::block_on(run_with_universal_transport(&endpoint)); + run_with_universal_transport(&endpoint).await; } #[cfg(feature = "transport_tcp")] -#[test] -fn authenticator_tcp_with_lowlatency_transport() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn authenticator_tcp_with_lowlatency_transport() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 8100).parse().unwrap(); - task::block_on(run_with_lowlatency_transport(&endpoint)); + run_with_lowlatency_transport(&endpoint).await; } #[cfg(feature = "transport_udp")] -#[test] -fn authenticator_udp() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn authenticator_udp() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 8010).parse().unwrap(); - task::block_on(run_with_universal_transport(&endpoint)); + run_with_universal_transport(&endpoint).await; } #[cfg(feature = "transport_udp")] -#[test] -fn authenticator_udp_with_lowlatency_transport() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn authenticator_udp_with_lowlatency_transport() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 8110).parse().unwrap(); - task::block_on(run_with_lowlatency_transport(&endpoint)); + run_with_lowlatency_transport(&endpoint).await; } #[cfg(feature = "transport_unixpipe")] -#[test] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] -fn authenticator_unixpipe() { +async fn authenticator_unixpipe() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = "unixpipe/authenticator_unixpipe_test".parse().unwrap(); - task::block_on(run_with_universal_transport(&endpoint)); + run_with_universal_transport(&endpoint).await; } #[cfg(feature = "transport_unixpipe")] -#[test] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] -fn authenticator_unixpipe_with_lowlatency_transport() { +async fn authenticator_unixpipe_with_lowlatency_transport() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = "unixpipe/authenticator_unixpipe_with_lowlatency_transport" .parse() .unwrap(); - task::block_on(run_with_lowlatency_transport(&endpoint)); + run_with_lowlatency_transport(&endpoint).await; } #[cfg(feature = "transport_ws")] -#[test] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] -fn authenticator_ws() { +async fn authenticator_ws() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 8020).parse().unwrap(); - task::block_on(run_with_universal_transport(&endpoint)); + run_with_universal_transport(&endpoint).await; } #[cfg(feature = "transport_ws")] -#[test] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] -fn authenticator_ws_with_lowlatency_transport() { +async fn authenticator_ws_with_lowlatency_transport() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 8120).parse().unwrap(); - task::block_on(run_with_lowlatency_transport(&endpoint)); + run_with_lowlatency_transport(&endpoint).await; } #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] -#[test] -fn authenticator_unix() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn authenticator_unix() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let f1 = "zenoh-test-unix-socket-10.sock"; let _ = std::fs::remove_file(f1); let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); - task::block_on(run_with_universal_transport(&endpoint)); + run_with_universal_transport(&endpoint).await; let _ = std::fs::remove_file(f1); let _ = std::fs::remove_file(format!("{f1}.lock")); } #[cfg(feature = "transport_tls")] -#[test] -fn authenticator_tls() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn authenticator_tls() { use zenoh_link::tls::config::*; let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real @@ -859,18 +813,15 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== ) .unwrap(); - task::block_on(run_with_universal_transport(&endpoint)); + run_with_universal_transport(&endpoint).await; } #[cfg(feature = "transport_quic")] -#[test] -fn authenticator_quic() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn authenticator_quic() { use zenoh_link::quic::config::*; let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real @@ -962,5 +913,5 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== ) .unwrap(); - task::block_on(run_with_universal_transport(&endpoint)); + run_with_universal_transport(&endpoint).await; } diff --git a/io/zenoh-transport/tests/unicast_compression.rs b/io/zenoh-transport/tests/unicast_compression.rs index 323c6f529e..7707da57de 100644 --- a/io/zenoh-transport/tests/unicast_compression.rs +++ b/io/zenoh-transport/tests/unicast_compression.rs @@ -13,7 +13,6 @@ // #[cfg(feature = "transport_compression")] mod tests { - use async_std::{prelude::FutureExt, task}; use std::fmt::Write as _; use std::{ any::Any, @@ -24,7 +23,7 @@ mod tests { }, time::Duration, }; - use zenoh_core::zasync_executor_init; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ core::{ @@ -53,12 +52,6 @@ mod tests { const MSG_SIZE_LOWLATENCY: [usize; 2] = [1_024, 65000]; const MSG_SIZE_NOFRAG: [usize; 1] = [1_024]; - macro_rules! ztimeout { - ($f:expr) => { - $f.timeout(TIMEOUT).await.unwrap() - }; - } - // Transport Handler for the router struct SHRouter { count: Arc, @@ -252,7 +245,7 @@ mod tests { ztimeout!(async { while !router_manager.get_transports_unicast().await.is_empty() { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } }); @@ -263,19 +256,19 @@ mod tests { } ztimeout!(async { - while !router_manager.get_listeners().is_empty() { - task::sleep(SLEEP).await; + while !router_manager.get_listeners().await.is_empty() { + tokio::time::sleep(SLEEP).await; } }); // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; ztimeout!(router_manager.close()); ztimeout!(client_manager.close()); // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } async fn test_transport( @@ -319,21 +312,21 @@ mod tests { Reliability::Reliable => { ztimeout!(async { while router_handler.get_count() != MSG_COUNT { - task::sleep(SLEEP_COUNT).await; + tokio::time::sleep(SLEEP_COUNT).await; } }); } Reliability::BestEffort => { ztimeout!(async { while router_handler.get_count() == 0 { - task::sleep(SLEEP_COUNT).await; + tokio::time::sleep(SLEEP_COUNT).await; } }); } }; // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } async fn run_single( @@ -427,12 +420,9 @@ mod tests { } #[cfg(feature = "transport_tcp")] - #[test] - fn transport_unicast_compression_tcp_only() { + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn transport_unicast_compression_tcp_only() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // Define the locators let endpoints: Vec = vec![ @@ -451,21 +441,13 @@ mod tests { }, ]; // Run - task::block_on(run_with_universal_transport( - &endpoints, - &endpoints, - &channel, - &MSG_SIZE_ALL, - )); + run_with_universal_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_ALL).await; } #[cfg(feature = "transport_tcp")] - #[test] - fn transport_unicast_compression_tcp_only_with_lowlatency_transport() { + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn transport_unicast_compression_tcp_only_with_lowlatency_transport() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // Define the locators let endpoints: Vec = vec![format!("tcp/127.0.0.1:{}", 19100).parse().unwrap()]; @@ -481,21 +463,13 @@ mod tests { }, ]; // Run - task::block_on(run_with_lowlatency_transport( - &endpoints, - &endpoints, - &channel, - &MSG_SIZE_LOWLATENCY, - )); + run_with_lowlatency_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_LOWLATENCY).await; } #[cfg(feature = "transport_udp")] - #[test] - fn transport_unicast_compression_udp_only() { + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn transport_unicast_compression_udp_only() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // Define the locator let endpoints: Vec = vec![ @@ -514,21 +488,13 @@ mod tests { }, ]; // Run - task::block_on(run_with_universal_transport( - &endpoints, - &endpoints, - &channel, - &MSG_SIZE_NOFRAG, - )); + run_with_universal_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_NOFRAG).await; } #[cfg(feature = "transport_udp")] - #[test] - fn transport_unicast_compression_udp_only_with_lowlatency_transport() { + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn transport_unicast_compression_udp_only_with_lowlatency_transport() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // Define the locator let endpoints: Vec = vec![format!("udp/127.0.0.1:{}", 19110).parse().unwrap()]; @@ -544,11 +510,6 @@ mod tests { }, ]; // Run - task::block_on(run_with_lowlatency_transport( - &endpoints, - &endpoints, - &channel, - &MSG_SIZE_NOFRAG, - )); + run_with_lowlatency_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_NOFRAG).await; } } diff --git a/io/zenoh-transport/tests/unicast_concurrent.rs b/io/zenoh-transport/tests/unicast_concurrent.rs index d13f763b68..ae17ae3f99 100644 --- a/io/zenoh-transport/tests/unicast_concurrent.rs +++ b/io/zenoh-transport/tests/unicast_concurrent.rs @@ -10,15 +10,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::prelude::FutureExt; -use async_std::sync::Barrier; -use async_std::task; use std::any::Any; use std::convert::TryFrom; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; -use zenoh_core::zasync_executor_init; +use tokio::sync::Barrier; +use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohId}, @@ -42,12 +40,6 @@ const MSG_SIZE: usize = 1_024; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_millis(100); -macro_rules! ztimeout { - ($f:expr) => { - $f.timeout(TIMEOUT).await.unwrap() - }; -} - // Transport Handler for the router struct SHPeer { count: Arc, @@ -146,14 +138,14 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec Adding endpoint {e:?}: {res:?}"); assert!(res.is_ok()); } - let locs = peer01_manager.get_listeners(); + let locs = peer01_manager.get_listeners().await; println!("[Transport Peer 01b] => Getting endpoints: {c_end01:?} {locs:?}"); assert_eq!(c_end01.len(), locs.len()); @@ -163,7 +155,7 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec Waiting for opening transport"); // Syncrhonize before opening the transports ztimeout!(cc_barow.wait()); @@ -224,7 +216,7 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec, endpoint02: Vec Adding endpoint {e:?}: {res:?}"); assert!(res.is_ok()); } - let locs = peer02_manager.get_listeners(); + let locs = peer02_manager.get_listeners().await; println!("[Transport Peer 02b] => Getting endpoints: {c_end02:?} {locs:?}"); assert_eq!(c_end02.len(), locs.len()); @@ -264,7 +256,7 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec Waiting for opening transport"); // Syncrhonize before opening the transports ztimeout!(cc_barow.wait()); @@ -329,7 +321,7 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec, endpoint02: Vec Starting..."); - peer01_task.join(peer02_task).await; + let _ = tokio::join!(peer01_task, peer02_task); println!("[Transport Current 02] => ...Stopped"); // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } #[cfg(feature = "transport_tcp")] -#[test] -fn transport_tcp_concurrent() { +#[tokio::test] +async fn transport_tcp_concurrent() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); let endpoint01: Vec = vec![ format!("tcp/127.0.0.1:{}", 9000).parse().unwrap(), @@ -382,19 +371,14 @@ fn transport_tcp_concurrent() { format!("tcp/127.0.0.1:{}", 9017).parse().unwrap(), ]; - task::block_on(async { - transport_concurrent(endpoint01, endpoint02).await; - }); + transport_concurrent(endpoint01, endpoint02).await; } #[cfg(feature = "transport_ws")] -#[test] +#[tokio::test] #[ignore] -fn transport_ws_concurrent() { +async fn transport_ws_concurrent() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); let endpoint01: Vec = vec![ format!("ws/127.0.0.1:{}", 9020).parse().unwrap(), @@ -417,19 +401,14 @@ fn transport_ws_concurrent() { format!("ws/127.0.0.1:{}", 9037).parse().unwrap(), ]; - task::block_on(async { - transport_concurrent(endpoint01, endpoint02).await; - }); + transport_concurrent(endpoint01, endpoint02).await; } #[cfg(feature = "transport_unixpipe")] -#[test] +#[tokio::test] #[ignore] -fn transport_unixpipe_concurrent() { +async fn transport_unixpipe_concurrent() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); let endpoint01: Vec = vec![ "unixpipe/transport_unixpipe_concurrent".parse().unwrap(), @@ -452,7 +431,5 @@ fn transport_unixpipe_concurrent() { "unixpipe/transport_unixpipe_concurrent16".parse().unwrap(), ]; - task::block_on(async { - transport_concurrent(endpoint01, endpoint02).await; - }); + transport_concurrent(endpoint01, endpoint02).await; } diff --git a/io/zenoh-transport/tests/unicast_defragmentation.rs b/io/zenoh-transport/tests/unicast_defragmentation.rs index 8410188c2e..4b09bac0f4 100644 --- a/io/zenoh-transport/tests/unicast_defragmentation.rs +++ b/io/zenoh-transport/tests/unicast_defragmentation.rs @@ -11,9 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::{prelude::FutureExt, task}; use std::{convert::TryFrom, sync::Arc, time::Duration}; -use zenoh_core::zasync_executor_init; +use zenoh_core::ztimeout; use zenoh_protocol::{ core::{ Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, ZenohId, @@ -35,12 +34,6 @@ const SLEEP: Duration = Duration::from_secs(1); const MSG_SIZE: usize = 131_072; const MSG_DEFRAG_BUF: usize = 128_000; -macro_rules! ztimeout { - ($f:expr) => { - $f.timeout(TIMEOUT).await.unwrap() - }; -} - async fn run(endpoint: &EndPoint, channel: Channel, msg_size: usize) { // Define client and router IDs let client_id = ZenohId::try_from([1]).unwrap(); @@ -104,14 +97,14 @@ async fn run(endpoint: &EndPoint, channel: Channel, msg_size: usize) { // Wait that the client transport has been closed ztimeout!(async { while client_transport.get_zid().is_ok() { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } }); // Wait on the router manager that the transport has been closed ztimeout!(async { while !router_manager.get_transports_unicast().await.is_empty() { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } }); @@ -121,27 +114,24 @@ async fn run(endpoint: &EndPoint, channel: Channel, msg_size: usize) { // Wait a little bit ztimeout!(async { - while !router_manager.get_listeners().is_empty() { - task::sleep(SLEEP).await; + while !router_manager.get_listeners().await.is_empty() { + tokio::time::sleep(SLEEP).await; } }); - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; ztimeout!(router_manager.close()); ztimeout!(client_manager.close()); // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } #[cfg(feature = "transport_tcp")] -#[test] -fn transport_unicast_defragmentation_tcp_only() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_unicast_defragmentation_tcp_only() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // Define the locators let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 11000).parse().unwrap(); @@ -165,21 +155,16 @@ fn transport_unicast_defragmentation_tcp_only() { }, ]; // Run - task::block_on(async { - for ch in channel.iter() { - run(&endpoint, *ch, MSG_SIZE).await; - } - }); + for ch in channel.iter() { + run(&endpoint, *ch, MSG_SIZE).await; + } } #[cfg(feature = "transport_ws")] -#[test] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] -fn transport_unicast_defragmentation_ws_only() { +async fn transport_unicast_defragmentation_ws_only() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // Define the locators let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 11010).parse().unwrap(); @@ -203,21 +188,16 @@ fn transport_unicast_defragmentation_ws_only() { }, ]; // Run - task::block_on(async { - for ch in channel.iter() { - run(&endpoint, *ch, MSG_SIZE).await; - } - }); + for ch in channel.iter() { + run(&endpoint, *ch, MSG_SIZE).await; + } } #[cfg(feature = "transport_unixpipe")] -#[test] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] -fn transport_unicast_defragmentation_unixpipe_only() { +async fn transport_unicast_defragmentation_unixpipe_only() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // Define the locators let endpoint: EndPoint = "unixpipe/transport_unicast_defragmentation_unixpipe_only" @@ -243,9 +223,7 @@ fn transport_unicast_defragmentation_unixpipe_only() { }, ]; // Run - task::block_on(async { - for ch in channel.iter() { - run(&endpoint, *ch, MSG_SIZE).await; - } - }); + for ch in channel.iter() { + run(&endpoint, *ch, MSG_SIZE).await; + } } diff --git a/io/zenoh-transport/tests/unicast_intermittent.rs b/io/zenoh-transport/tests/unicast_intermittent.rs index d1aad4ef38..04711e66ec 100644 --- a/io/zenoh-transport/tests/unicast_intermittent.rs +++ b/io/zenoh-transport/tests/unicast_intermittent.rs @@ -11,15 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::prelude::FutureExt; -use async_std::task; use std::any::Any; use std::convert::TryFrom; use std::io::Write; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; -use zenoh_core::zasync_executor_init; +use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohId}, @@ -46,11 +44,6 @@ const TIMEOUT: Duration = Duration::from_secs(300); const SLEEP: Duration = Duration::from_millis(100); const USLEEP: Duration = Duration::from_millis(1); -macro_rules! ztimeout { - ($f:expr) => { - $f.timeout(TIMEOUT).await.unwrap() - }; -} #[cfg(test)] #[derive(Default)] struct SHRouterIntermittent; @@ -227,7 +220,7 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) // Add a listener to the router println!("\nTransport Intermittent [1a1]"); let _ = ztimeout!(router_manager.add_listener(endpoint.clone())).unwrap(); - let locators = router_manager.get_listeners(); + let locators = router_manager.get_listeners().await; println!("Transport Intermittent [1a2]: {locators:?}"); assert_eq!(locators.len(), 1); @@ -243,7 +236,7 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) let c_client02_manager = client02_manager.clone(); let c_endpoint = endpoint.clone(); let c_router_id = router_id; - let c2_handle = task::spawn(async move { + let c2_handle = tokio::task::spawn(async move { loop { print!("+"); std::io::stdout().flush().unwrap(); @@ -254,21 +247,21 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) assert_eq!(c_client02_manager.get_transports_unicast().await.len(), 1); assert_eq!(c_ses2.get_zid().unwrap(), c_router_id); - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; print!("-"); std::io::stdout().flush().unwrap(); ztimeout!(c_ses2.close()).unwrap(); - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } }); let c_client03_manager = client03_manager.clone(); let c_endpoint = endpoint.clone(); let c_router_id = router_id; - let c3_handle = task::spawn(async move { + let c3_handle = tokio::task::spawn(async move { loop { print!("*"); std::io::stdout().flush().unwrap(); @@ -279,21 +272,22 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) assert_eq!(c_client03_manager.get_transports_unicast().await.len(), 1); assert_eq!(c_ses3.get_zid().unwrap(), c_router_id); - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; print!(""); std::io::stdout().flush().unwrap(); ztimeout!(c_ses3.close()).unwrap(); - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } }); /* [4] */ println!("Transport Intermittent [4a1]"); let c_router_manager = router_manager.clone(); - ztimeout!(task::spawn_blocking(move || task::block_on(async { + let rt = tokio::runtime::Handle::current(); + let _ = ztimeout!(tokio::task::spawn_blocking(move || rt.block_on(async { // Create the message to send let message: NetworkMessage = Push { wire_expr: "test".into(), @@ -342,14 +336,14 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) count += 1; } else { print!("O"); - task::sleep(USLEEP).await; + tokio::time::sleep(USLEEP).await; } } }))); // Stop the tasks - ztimeout!(c2_handle.cancel()); - ztimeout!(c3_handle.cancel()); + c2_handle.abort(); + c3_handle.abort(); // Check that client01 received all the messages println!("Transport Intermittent [4b1]"); @@ -360,7 +354,7 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) break; } println!("Transport Intermittent [4b2]: Received {c}/{MSG_COUNT}"); - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } }); @@ -388,7 +382,7 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) if transports.is_empty() { break; } - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } }); @@ -398,7 +392,7 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) ztimeout!(router_manager.del_listener(endpoint)).unwrap(); // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; ztimeout!(router_manager.close()); ztimeout!(client01_manager.close()); @@ -406,7 +400,7 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) ztimeout!(client03_manager.close()); // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } async fn universal_transport_intermittent(endpoint: &EndPoint) { @@ -418,79 +412,55 @@ async fn lowlatency_transport_intermittent(endpoint: &EndPoint) { } #[cfg(feature = "transport_tcp")] -#[test] -fn transport_tcp_intermittent() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_tcp_intermittent() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 12000).parse().unwrap(); - task::block_on(universal_transport_intermittent(&endpoint)); + universal_transport_intermittent(&endpoint).await; } #[cfg(feature = "transport_tcp")] -#[test] -fn transport_tcp_intermittent_for_lowlatency_transport() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_tcp_intermittent_for_lowlatency_transport() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 12100).parse().unwrap(); - task::block_on(lowlatency_transport_intermittent(&endpoint)); + lowlatency_transport_intermittent(&endpoint).await; } #[cfg(feature = "transport_ws")] -#[test] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] -fn transport_ws_intermittent() { +async fn transport_ws_intermittent() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 12010).parse().unwrap(); - task::block_on(universal_transport_intermittent(&endpoint)); + universal_transport_intermittent(&endpoint).await; } #[cfg(feature = "transport_ws")] -#[test] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] -fn transport_ws_intermittent_for_lowlatency_transport() { +async fn transport_ws_intermittent_for_lowlatency_transport() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 12110).parse().unwrap(); - task::block_on(lowlatency_transport_intermittent(&endpoint)); + lowlatency_transport_intermittent(&endpoint).await; } #[cfg(feature = "transport_unixpipe")] -#[test] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] -fn transport_unixpipe_intermittent() { +async fn transport_unixpipe_intermittent() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = "unixpipe/transport_unixpipe_intermittent".parse().unwrap(); - task::block_on(universal_transport_intermittent(&endpoint)); + universal_transport_intermittent(&endpoint).await; } #[cfg(feature = "transport_unixpipe")] -#[test] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] -fn transport_unixpipe_intermittent_for_lowlatency_transport() { +async fn transport_unixpipe_intermittent_for_lowlatency_transport() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = "unixpipe/transport_unixpipe_intermittent_for_lowlatency_transport" .parse() .unwrap(); - task::block_on(lowlatency_transport_intermittent(&endpoint)); + lowlatency_transport_intermittent(&endpoint).await; } diff --git a/io/zenoh-transport/tests/unicast_multilink.rs b/io/zenoh-transport/tests/unicast_multilink.rs index cd8a48565a..2fe73853b9 100644 --- a/io/zenoh-transport/tests/unicast_multilink.rs +++ b/io/zenoh-transport/tests/unicast_multilink.rs @@ -13,9 +13,8 @@ // #[cfg(feature = "transport_multilink")] mod tests { - use async_std::{prelude::FutureExt, task}; use std::{convert::TryFrom, sync::Arc, time::Duration}; - use zenoh_core::zasync_executor_init; + use zenoh_core::ztimeout; use zenoh_link::EndPoint; use zenoh_protocol::core::{WhatAmI, ZenohId}; use zenoh_result::ZResult; @@ -28,12 +27,6 @@ mod tests { const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_millis(100); - macro_rules! ztimeout { - ($f:expr) => { - $f.timeout(TIMEOUT).await.unwrap() - }; - } - #[cfg(test)] #[derive(Default)] struct SHRouterOpenClose; @@ -141,7 +134,7 @@ mod tests { println!("Transport Open Close [1a1]: {res:?}"); assert!(res.is_ok()); println!("Transport Open Close [1a2]"); - let locators = router_manager.get_listeners(); + let locators = router_manager.get_listeners().await; println!("Transport Open Close [1a2]: {locators:?}"); assert_eq!(locators.len(), 1); @@ -179,7 +172,7 @@ mod tests { assert_eq!(links.len(), links_num); break; } - None => task::sleep(SLEEP).await, + None => tokio::time::sleep(SLEEP).await, } } }); @@ -219,7 +212,7 @@ mod tests { if links.len() == links_num { break; } - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } }); @@ -243,7 +236,7 @@ mod tests { // Verify that the transport has not been open on the router println!("Transport Open Close [3d1]"); ztimeout!(async { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; let transports = router_manager.get_transports_unicast().await; assert_eq!(transports.len(), 1); let s = transports @@ -276,7 +269,7 @@ mod tests { if index.is_none() { break; } - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } }); @@ -303,7 +296,7 @@ mod tests { // Verify that the transport has been open on the router println!("Transport Open Close [5d1]"); ztimeout!(async { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; let transports = router_manager.get_transports_unicast().await; assert_eq!(transports.len(), 1); let s = transports @@ -346,7 +339,7 @@ mod tests { // Verify that the transport has been open on the router println!("Transport Open Close [6f1]"); ztimeout!(async { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; let transports = router_manager.get_transports_unicast().await; assert_eq!(transports.len(), 2); let s = transports @@ -392,7 +385,7 @@ mod tests { if transports.is_empty() { break; } - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } }); @@ -429,7 +422,7 @@ mod tests { assert_eq!(links.len(), links_num); break; } - None => task::sleep(SLEEP).await, + None => tokio::time::sleep(SLEEP).await, } } }); @@ -453,7 +446,7 @@ mod tests { if transports.is_empty() { break; } - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } }); @@ -465,98 +458,80 @@ mod tests { assert!(res.is_ok()); ztimeout!(async { - while !router_manager.get_listeners().is_empty() { - task::sleep(SLEEP).await; + while !router_manager.get_listeners().await.is_empty() { + tokio::time::sleep(SLEEP).await; } }); // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; ztimeout!(router_manager.close()); ztimeout!(client01_manager.close()); ztimeout!(client02_manager.close()); // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } #[cfg(feature = "transport_tcp")] - #[test] - fn multilink_tcp_only() { + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn multilink_tcp_only() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 18000).parse().unwrap(); - task::block_on(multilink_transport(&endpoint)); + multilink_transport(&endpoint).await; } #[cfg(feature = "transport_udp")] - #[test] - fn multilink_udp_only() { + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn multilink_udp_only() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 18010).parse().unwrap(); - task::block_on(multilink_transport(&endpoint)); + multilink_transport(&endpoint).await; } #[cfg(feature = "transport_ws")] - #[test] + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] - fn multilink_ws_only() { + async fn multilink_ws_only() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 18020).parse().unwrap(); - task::block_on(multilink_transport(&endpoint)); + multilink_transport(&endpoint).await; } #[cfg(feature = "transport_unixpipe")] - #[test] + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] - fn multilink_unixpipe_only() { + async fn multilink_unixpipe_only() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); let endpoint: EndPoint = "unixpipe/multilink_unixpipe_only".parse().unwrap(); - task::block_on(multilink_transport(&endpoint)); + multilink_transport(&endpoint).await; } #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] - #[test] + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] - fn multilink_unix_only() { + async fn multilink_unix_only() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); let f1 = "zenoh-test-unix-socket-9.sock"; let _ = std::fs::remove_file(f1); let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); - task::block_on(multilink_transport(&endpoint)); + multilink_transport(&endpoint).await; let _ = std::fs::remove_file(f1); let _ = std::fs::remove_file(format!("{f1}.lock")); } #[cfg(feature = "transport_tls")] - #[test] - fn multilink_tls_only() { + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn multilink_tls_only() { use zenoh_link::tls::config::*; let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real @@ -647,18 +622,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== ) .unwrap(); - task::block_on(multilink_transport(&endpoint)); + multilink_transport(&endpoint).await; } #[cfg(feature = "transport_quic")] - #[test] - fn multilink_quic_only() { + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn multilink_quic_only() { use zenoh_link::quic::config::*; - task::block_on(async { - zasync_executor_init!(); - }); - // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real // mapping to any existing domain. The certificate and key @@ -749,6 +720,6 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== ) .unwrap(); - task::block_on(multilink_transport(&endpoint)); + multilink_transport(&endpoint).await; } } diff --git a/io/zenoh-transport/tests/unicast_openclose.rs b/io/zenoh-transport/tests/unicast_openclose.rs index 56b686947a..dfa690c889 100644 --- a/io/zenoh-transport/tests/unicast_openclose.rs +++ b/io/zenoh-transport/tests/unicast_openclose.rs @@ -11,9 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::{prelude::FutureExt, task}; use std::{convert::TryFrom, sync::Arc, time::Duration}; -use zenoh_core::zasync_executor_init; +use zenoh_core::ztimeout; use zenoh_link::EndPoint; use zenoh_protocol::core::{WhatAmI, ZenohId}; use zenoh_result::ZResult; @@ -31,15 +30,9 @@ const TIMEOUT: Duration = Duration::from_secs(60); const TIMEOUT_EXPECTED: Duration = Duration::from_secs(5); const SLEEP: Duration = Duration::from_millis(100); -macro_rules! ztimeout { - ($f:expr) => { - $f.timeout(TIMEOUT).await.unwrap() - }; -} - macro_rules! ztimeout_expected { ($f:expr) => { - $f.timeout(TIMEOUT_EXPECTED).await.unwrap() + tokio::time::timeout(TIMEOUT_EXPECTED, $f).await.unwrap() }; } @@ -158,7 +151,7 @@ async fn openclose_transport( println!("Transport Open Close [1a1]: {res:?}"); assert!(res.is_ok()); println!("Transport Open Close [1a2]"); - let locators = router_manager.get_listeners(); + let locators = router_manager.get_listeners().await; println!("Transport Open Close [1a2]: {locators:?}"); assert_eq!(locators.len(), 1); @@ -169,7 +162,7 @@ async fn openclose_transport( println!("Transport Open Close [1c1]"); let open_res = ztimeout_expected!(client01_manager.open_transport_unicast(connect_endpoint.clone())); - println!("Transport Open Close [1c2]: {res:?}"); + println!("Transport Open Close [1c2]: {open_res:?}"); assert!(open_res.is_ok()); let c_ses1 = open_res.unwrap(); println!("Transport Open Close [1d1]"); @@ -197,7 +190,7 @@ async fn openclose_transport( assert_eq!(links.len(), links_num); break; } - None => task::sleep(SLEEP).await, + None => tokio::time::sleep(SLEEP).await, } } }); @@ -239,7 +232,7 @@ async fn openclose_transport( if links.len() == links_num { break; } - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } }); } else { @@ -266,7 +259,7 @@ async fn openclose_transport( // Verify that the transport has not been open on the router println!("Transport Open Close [3d1]"); ztimeout!(async { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; let transports = ztimeout!(router_manager.get_transports_unicast()); assert_eq!(transports.len(), 1); let s = transports @@ -299,7 +292,7 @@ async fn openclose_transport( if index.is_none() { break; } - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } }); @@ -326,7 +319,7 @@ async fn openclose_transport( // Verify that the transport has been open on the router println!("Transport Open Close [5d1]"); ztimeout!(async { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; let transports = ztimeout!(router_manager.get_transports_unicast()); assert_eq!(transports.len(), 1); let s = transports @@ -352,7 +345,7 @@ async fn openclose_transport( // Verify that the transport has not been open on the router println!("Transport Open Close [6c1]"); ztimeout!(async { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; let transports = ztimeout!(router_manager.get_transports_unicast()); assert_eq!(transports.len(), 1); let s = transports @@ -382,7 +375,7 @@ async fn openclose_transport( if transports.is_empty() { break; } - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } }); @@ -419,7 +412,7 @@ async fn openclose_transport( assert_eq!(links.len(), links_num); break; } - None => task::sleep(SLEEP).await, + None => tokio::time::sleep(SLEEP).await, } } }); @@ -443,7 +436,7 @@ async fn openclose_transport( if transports.is_empty() { break; } - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } }); @@ -455,20 +448,20 @@ async fn openclose_transport( assert!(res.is_ok()); ztimeout!(async { - while !router_manager.get_listeners().is_empty() { - task::sleep(SLEEP).await; + while !router_manager.get_listeners().await.is_empty() { + tokio::time::sleep(SLEEP).await; } }); // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; ztimeout!(router_manager.close()); ztimeout!(client01_manager.close()); ztimeout!(client02_manager.close()); // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } async fn openclose_universal_transport(endpoint: &EndPoint) { @@ -480,134 +473,94 @@ async fn openclose_lowlatency_transport(endpoint: &EndPoint) { } #[cfg(feature = "transport_tcp")] -#[test] -fn openclose_tcp_only() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn openclose_tcp_only() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13000).parse().unwrap(); - task::block_on(openclose_universal_transport(&endpoint)); + openclose_universal_transport(&endpoint).await; } #[cfg(feature = "transport_tcp")] -#[test] -fn openclose_tcp_only_with_lowlatency_transport() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn openclose_tcp_only_with_lowlatency_transport() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13100).parse().unwrap(); - task::block_on(openclose_lowlatency_transport(&endpoint)); + openclose_lowlatency_transport(&endpoint).await; } #[cfg(feature = "transport_udp")] -#[test] -fn openclose_udp_only() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn openclose_udp_only() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13010).parse().unwrap(); - task::block_on(openclose_universal_transport(&endpoint)); + openclose_universal_transport(&endpoint).await; } #[cfg(feature = "transport_udp")] -#[test] -fn openclose_udp_only_with_lowlatency_transport() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn openclose_udp_only_with_lowlatency_transport() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13110).parse().unwrap(); - task::block_on(openclose_lowlatency_transport(&endpoint)); + openclose_lowlatency_transport(&endpoint).await; } #[cfg(feature = "transport_ws")] -#[test] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] -fn openclose_ws_only() { +async fn openclose_ws_only() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13020).parse().unwrap(); - task::block_on(openclose_universal_transport(&endpoint)); + openclose_universal_transport(&endpoint).await; } #[cfg(feature = "transport_ws")] -#[test] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] -fn openclose_ws_only_with_lowlatency_transport() { +async fn openclose_ws_only_with_lowlatency_transport() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13120).parse().unwrap(); - task::block_on(openclose_lowlatency_transport(&endpoint)); + openclose_lowlatency_transport(&endpoint).await; } #[cfg(feature = "transport_unixpipe")] -#[test] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] -fn openclose_unixpipe_only() { +async fn openclose_unixpipe_only() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = "unixpipe/openclose_unixpipe_only".parse().unwrap(); - task::block_on(openclose_universal_transport(&endpoint)); + openclose_universal_transport(&endpoint).await; } #[cfg(feature = "transport_unixpipe")] -#[test] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] -fn openclose_unixpipe_only_with_lowlatency_transport() { +async fn openclose_unixpipe_only_with_lowlatency_transport() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = "unixpipe/openclose_unixpipe_only_with_lowlatency_transport" .parse() .unwrap(); - task::block_on(openclose_lowlatency_transport(&endpoint)); + openclose_lowlatency_transport(&endpoint).await; } #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] -#[test] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] -fn openclose_unix_only() { +async fn openclose_unix_only() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let f1 = "zenoh-test-unix-socket-9.sock"; let _ = std::fs::remove_file(f1); let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); - task::block_on(openclose_universal_transport(&endpoint)); + openclose_universal_transport(&endpoint).await; let _ = std::fs::remove_file(f1); let _ = std::fs::remove_file(format!("{f1}.lock")); } #[cfg(feature = "transport_tls")] -#[test] -fn openclose_tls_only() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn openclose_tls_only() { use zenoh_link::tls::config::*; let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real // mapping to any existing domain. The certificate and key @@ -697,18 +650,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== ) .unwrap(); - task::block_on(openclose_universal_transport(&endpoint)); + openclose_universal_transport(&endpoint).await; } #[cfg(feature = "transport_quic")] -#[test] -fn openclose_quic_only() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn openclose_quic_only() { use zenoh_link::quic::config::*; - task::block_on(async { - zasync_executor_init!(); - }); - // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real // mapping to any existing domain. The certificate and key @@ -799,20 +748,17 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== ) .unwrap(); - task::block_on(openclose_universal_transport(&endpoint)); + openclose_universal_transport(&endpoint).await; } #[cfg(feature = "transport_tcp")] #[cfg(target_os = "linux")] -#[test] -#[should_panic(expected = "TimeoutError")] -fn openclose_tcp_only_connect_with_interface_restriction() { +#[should_panic(expected = "Elapsed")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn openclose_tcp_only_connect_with_interface_restriction() { let addrs = get_ipv4_ipaddrs(None); let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); let listen_endpoint: EndPoint = format!("tcp/{}:{}", addrs[0], 13001).parse().unwrap(); @@ -821,24 +767,17 @@ fn openclose_tcp_only_connect_with_interface_restriction() { .unwrap(); // should not connect to local interface and external address - task::block_on(openclose_transport( - &listen_endpoint, - &connect_endpoint, - false, - )); + openclose_transport(&listen_endpoint, &connect_endpoint, false).await; } #[cfg(feature = "transport_tcp")] #[cfg(target_os = "linux")] -#[test] #[should_panic(expected = "assertion failed: open_res.is_ok()")] -fn openclose_tcp_only_listen_with_interface_restriction() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn openclose_tcp_only_listen_with_interface_restriction() { let addrs = get_ipv4_ipaddrs(None); let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); let listen_endpoint: EndPoint = format!("tcp/{}:{}#iface=lo", addrs[0], 13002) .parse() @@ -847,24 +786,17 @@ fn openclose_tcp_only_listen_with_interface_restriction() { let connect_endpoint: EndPoint = format!("tcp/{}:{}", addrs[0], 13002).parse().unwrap(); // should not connect to local interface and external address - task::block_on(openclose_transport( - &listen_endpoint, - &connect_endpoint, - false, - )); + openclose_transport(&listen_endpoint, &connect_endpoint, false).await; } #[cfg(feature = "transport_udp")] #[cfg(target_os = "linux")] -#[test] -#[should_panic(expected = "TimeoutError")] -fn openclose_udp_only_connect_with_interface_restriction() { +#[should_panic(expected = "Elapsed")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn openclose_udp_only_connect_with_interface_restriction() { let addrs = get_ipv4_ipaddrs(None); let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); let listen_endpoint: EndPoint = format!("udp/{}:{}", addrs[0], 13003).parse().unwrap(); @@ -873,25 +805,17 @@ fn openclose_udp_only_connect_with_interface_restriction() { .unwrap(); // should not connect to local interface and external address - task::block_on(openclose_transport( - &listen_endpoint, - &connect_endpoint, - false, - )); + openclose_transport(&listen_endpoint, &connect_endpoint, false).await; } #[cfg(feature = "transport_udp")] #[cfg(target_os = "linux")] -#[test] -#[should_panic(expected = "assertion failed: open_res.is_ok()")] -fn openclose_udp_onlyi_listen_with_interface_restriction() { +#[should_panic(expected = "Elapsed")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn openclose_udp_only_listen_with_interface_restriction() { let addrs = get_ipv4_ipaddrs(None); let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let listen_endpoint: EndPoint = format!("udp/{}:{}#iface=lo", addrs[0], 13004) .parse() .unwrap(); @@ -899,9 +823,5 @@ fn openclose_udp_onlyi_listen_with_interface_restriction() { let connect_endpoint: EndPoint = format!("udp/{}:{}", addrs[0], 13004).parse().unwrap(); // should not connect to local interface and external address - task::block_on(openclose_transport( - &listen_endpoint, - &connect_endpoint, - false, - )); + openclose_transport(&listen_endpoint, &connect_endpoint, false).await; } diff --git a/io/zenoh-transport/tests/unicast_priorities.rs b/io/zenoh-transport/tests/unicast_priorities.rs index a00e53643d..9c851a0510 100644 --- a/io/zenoh-transport/tests/unicast_priorities.rs +++ b/io/zenoh-transport/tests/unicast_priorities.rs @@ -11,15 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::prelude::FutureExt; -use async_std::task; use std::any::Any; use std::convert::TryFrom; use std::fmt::Write as _; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; -use zenoh_core::zasync_executor_init; +use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::network::NetworkBody; use zenoh_protocol::{ @@ -57,12 +55,6 @@ const PRIORITY_ALL: [Priority; 8] = [ Priority::Background, ]; -macro_rules! ztimeout { - ($f:expr) => { - $f.timeout(TIMEOUT).await.unwrap() - }; -} - // Transport Handler for the router struct SHRouter { priority: Arc, @@ -265,7 +257,7 @@ async fn close_transport( ztimeout!(async { while !router_manager.get_transports_unicast().await.is_empty() { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } }); @@ -276,13 +268,13 @@ async fn close_transport( } // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; ztimeout!(router_manager.close()); ztimeout!(client_manager.close()); // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } async fn single_run(router_handler: Arc, client_transport: TransportUnicast) { @@ -320,14 +312,14 @@ async fn single_run(router_handler: Arc, client_transport: TransportUn // Wait for the messages to arrive to the other side ztimeout!(async { while router_handler.get_count() != MSG_COUNT { - task::sleep(SLEEP_COUNT).await; + tokio::time::sleep(SLEEP_COUNT).await; } }); } } // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } async fn run(endpoints: &[EndPoint]) { @@ -338,44 +330,35 @@ async fn run(endpoints: &[EndPoint]) { } #[cfg(feature = "transport_tcp")] -#[test] -fn priorities_tcp_only() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn priorities_tcp_only() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // Define the locators let endpoints: Vec = vec![format!("tcp/127.0.0.1:{}", 10000).parse().unwrap()]; // Run - task::block_on(run(&endpoints)); + run(&endpoints).await; } #[cfg(feature = "transport_unixpipe")] -#[test] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] -fn conduits_unixpipe_only() { +async fn conduits_unixpipe_only() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // Define the locators let endpoints: Vec = vec!["unixpipe/conduits_unixpipe_only" .to_string() .parse() .unwrap()]; // Run - task::block_on(run(&endpoints)); + run(&endpoints).await; } #[cfg(feature = "transport_ws")] -#[test] -fn priorities_ws_only() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn priorities_ws_only() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // Define the locators let endpoints: Vec = vec![format!("ws/127.0.0.1:{}", 10010).parse().unwrap()]; // Run - task::block_on(run(&endpoints)); + run(&endpoints).await; } diff --git a/io/zenoh-transport/tests/unicast_shm.rs b/io/zenoh-transport/tests/unicast_shm.rs index f9180849af..6796f803ca 100644 --- a/io/zenoh-transport/tests/unicast_shm.rs +++ b/io/zenoh-transport/tests/unicast_shm.rs @@ -13,7 +13,6 @@ // #[cfg(feature = "shared-memory")] mod tests { - use async_std::{prelude::FutureExt, task}; use rand::{Rng, SeedableRng}; use std::{ any::Any, @@ -25,7 +24,7 @@ mod tests { time::Duration, }; use zenoh_buffers::buffer::SplitBuffer; - use zenoh_core::zasync_executor_init; + use zenoh_core::ztimeout; use zenoh_crypto::PseudoRng; use zenoh_link::Link; use zenoh_protocol::{ @@ -50,12 +49,6 @@ mod tests { const MSG_COUNT: usize = 1_000; const MSG_SIZE: usize = 1_024; - macro_rules! ztimeout { - ($f:expr) => { - $f.timeout(TIMEOUT).await.unwrap() - }; - } - // Transport Handler for the router struct SHPeer { count: Arc, @@ -220,10 +213,7 @@ mod tests { // Create the listener on the peer println!("Transport SHM [1a]"); - let _ = ztimeout!(peer_shm01_manager - .add_listener(endpoint.clone()) - .timeout(TIMEOUT)) - .unwrap(); + let _ = ztimeout!(peer_shm01_manager.add_listener(endpoint.clone())).unwrap(); // Create a transport with the peer println!("Transport SHM [1b]"); @@ -261,7 +251,7 @@ mod tests { loop { match shm01.alloc(MSG_SIZE) { Ok(sbuf) => break sbuf, - Err(_) => task::sleep(USLEEP).await, + Err(_) => tokio::time::sleep(USLEEP).await, } } }); @@ -291,13 +281,13 @@ mod tests { } // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; // Wait for the messages to arrive to the other side println!("Transport SHM [3b]"); ztimeout!(async { while peer_shm02_handler.get_count() != MSG_COUNT { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } }); @@ -310,7 +300,7 @@ mod tests { loop { match shm01.alloc(MSG_SIZE) { Ok(sbuf) => break sbuf, - Err(_) => task::sleep(USLEEP).await, + Err(_) => tokio::time::sleep(USLEEP).await, } } }); @@ -339,18 +329,18 @@ mod tests { } // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; // Wait for the messages to arrive to the other side println!("Transport SHM [4b]"); ztimeout!(async { while peer_net01_handler.get_count() != MSG_COUNT { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } }); // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; // Close the transports println!("Transport SHM [5a]"); @@ -361,7 +351,7 @@ mod tests { ztimeout!(async { while !peer_shm01_manager.get_transports_unicast().await.is_empty() { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } }); @@ -371,91 +361,67 @@ mod tests { // Wait a little bit ztimeout!(async { - while !peer_shm01_manager.get_listeners().is_empty() { - task::sleep(SLEEP).await; + while !peer_shm01_manager.get_listeners().await.is_empty() { + tokio::time::sleep(SLEEP).await; } }); - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; ztimeout!(peer_net01_manager.close()); ztimeout!(peer_shm01_manager.close()); ztimeout!(peer_shm02_manager.close()); // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } #[cfg(feature = "transport_tcp")] - #[test] - fn transport_tcp_shm() { + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn transport_tcp_shm() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 14000).parse().unwrap(); - task::block_on(run(&endpoint, false)); + run(&endpoint, false).await; } #[cfg(feature = "transport_tcp")] - #[test] - fn transport_tcp_shm_with_lowlatency_transport() { + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn transport_tcp_shm_with_lowlatency_transport() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 14001).parse().unwrap(); - task::block_on(run(&endpoint, true)); + run(&endpoint, true).await; } #[cfg(feature = "transport_ws")] - #[test] - fn transport_ws_shm() { + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn transport_ws_shm() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 14010).parse().unwrap(); - task::block_on(run(&endpoint, false)); + run(&endpoint, false).await; } #[cfg(feature = "transport_ws")] - #[test] - fn transport_ws_shm_with_lowlatency_transport() { + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn transport_ws_shm_with_lowlatency_transport() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 14011).parse().unwrap(); - task::block_on(run(&endpoint, true)); + run(&endpoint, true).await; } #[cfg(feature = "transport_unixpipe")] - #[test] - fn transport_unixpipe_shm() { + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn transport_unixpipe_shm() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = "unixpipe/transport_unixpipe_shm".parse().unwrap(); - task::block_on(run(&endpoint, false)); + run(&endpoint, false).await; } #[cfg(feature = "transport_unixpipe")] - #[test] - fn transport_unixpipe_shm_with_lowlatency_transport() { + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn transport_unixpipe_shm_with_lowlatency_transport() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint: EndPoint = "unixpipe/transport_unixpipe_shm_with_lowlatency_transport" .parse() .unwrap(); - task::block_on(run(&endpoint, true)); + run(&endpoint, true).await; } } diff --git a/io/zenoh-transport/tests/unicast_simultaneous.rs b/io/zenoh-transport/tests/unicast_simultaneous.rs index 19380eb49e..83c3d98dce 100644 --- a/io/zenoh-transport/tests/unicast_simultaneous.rs +++ b/io/zenoh-transport/tests/unicast_simultaneous.rs @@ -13,14 +13,12 @@ // #[cfg(target_family = "unix")] mod tests { - use async_std::prelude::FutureExt; - use async_std::task; use std::any::Any; use std::convert::TryFrom; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; - use zenoh_core::zasync_executor_init; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohId}, @@ -42,12 +40,6 @@ mod tests { const MSG_COUNT: usize = 16; const MSG_SIZE: usize = 1_024; - macro_rules! ztimeout { - ($f:expr) => { - $f.timeout(TIMEOUT).await.unwrap() - }; - } - // Transport Handler for the router struct SHPeer { zid: ZenohId, @@ -168,7 +160,7 @@ mod tests { println!("[Simultaneous 01a] => Adding endpoint {e:?}: {res:?}"); assert!(res.is_ok()); } - let locs = peer01_manager.get_listeners(); + let locs = peer01_manager.get_listeners().await; println!("[Simultaneous 01b] => Getting endpoints: {endpoint01:?} {locs:?}"); assert_eq!(endpoint01.len(), locs.len()); @@ -178,7 +170,7 @@ mod tests { println!("[Simultaneous 02a] => Adding endpoint {e:?}: {res:?}"); assert!(res.is_ok()); } - let locs = peer02_manager.get_listeners(); + let locs = peer02_manager.get_listeners().await; println!("[Simultaneous 02b] => Getting endpoints: {endpoint02:?} {locs:?}"); assert_eq!(endpoint02.len(), locs.len()); @@ -188,7 +180,7 @@ mod tests { // Peer01 let c_p01m = peer01_manager.clone(); - let peer01_task = task::spawn(async move { + let peer01_task = tokio::task::spawn(async move { // Open the transport with the second peer // These open should succeed for e in c_ep02.iter() { @@ -203,12 +195,12 @@ mod tests { assert!(res.is_err()); } - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; let tp02 = ztimeout!(async { let mut tp02 = None; while tp02.is_none() { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; println!( "[Simultaneous 01e] => Transports: {:?}", peer01_manager.get_transports_unicast().await @@ -224,7 +216,7 @@ mod tests { let expected = endpoint01.len() + c_ep02.len(); let mut tl02 = vec![]; while tl02.len() != expected { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; tl02 = tp02.get_links().unwrap(); println!("[Simultaneous 01f] => Links {}/{}", tl02.len(), expected); } @@ -234,7 +226,7 @@ mod tests { ztimeout!(async { let mut check = 0; while check != MSG_COUNT { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; check = peer_sh01.get_count(); println!("[Simultaneous 01g] => Received {check:?}/{MSG_COUNT:?}"); } @@ -243,7 +235,7 @@ mod tests { // Peer02 let c_p02m = peer02_manager.clone(); - let peer02_task = task::spawn(async move { + let peer02_task = tokio::task::spawn(async move { // Open the transport with the first peer // These open should succeed for e in c_ep01.iter() { @@ -259,12 +251,12 @@ mod tests { } // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; let tp01 = ztimeout!(async { let mut tp01 = None; while tp01.is_none() { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; println!( "[Simultaneous 02e] => Transports: {:?}", peer02_manager.get_transports_unicast().await @@ -279,7 +271,7 @@ mod tests { let expected = c_ep01.len() + endpoint02.len(); let mut tl01 = vec![]; while tl01.len() != expected { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; tl01 = tp01.get_links().unwrap(); println!("[Simultaneous 02f] => Links {}/{}", tl01.len(), expected); } @@ -289,7 +281,7 @@ mod tests { ztimeout!(async { let mut check = 0; while check != MSG_COUNT { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; check = peer_sh02.get_count(); println!("[Simultaneous 02g] => Received {check:?}/{MSG_COUNT:?}"); } @@ -297,21 +289,17 @@ mod tests { }); println!("[Simultaneous] => Waiting for peer01 and peer02 tasks..."); - peer01_task.join(peer02_task).await; + let _ = tokio::join!(peer01_task, peer02_task); println!("[Simultaneous] => Waiting for peer01 and peer02 tasks... DONE\n"); // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } #[cfg(feature = "transport_tcp")] - #[test] - fn transport_tcp_simultaneous() { + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn transport_tcp_simultaneous() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint01: Vec = vec![ format!("tcp/127.0.0.1:{}", 15000).parse().unwrap(), format!("tcp/127.0.0.1:{}", 15001).parse().unwrap(), @@ -325,20 +313,14 @@ mod tests { format!("tcp/127.0.0.1:{}", 15013).parse().unwrap(), ]; - task::block_on(async { - transport_simultaneous(endpoint01, endpoint02).await; - }); + transport_simultaneous(endpoint01, endpoint02).await; } #[cfg(feature = "transport_unixpipe")] - #[test] + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] - fn transport_unixpipe_simultaneous() { + async fn transport_unixpipe_simultaneous() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - let endpoint01: Vec = vec![ "unixpipe/transport_unixpipe_simultaneous".parse().unwrap(), "unixpipe/transport_unixpipe_simultaneous2".parse().unwrap(), @@ -352,19 +334,14 @@ mod tests { "unixpipe/transport_unixpipe_simultaneous8".parse().unwrap(), ]; - task::block_on(async { - transport_simultaneous(endpoint01, endpoint02).await; - }); + transport_simultaneous(endpoint01, endpoint02).await; } #[cfg(feature = "transport_ws")] - #[test] + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] - fn transport_ws_simultaneous() { + async fn transport_ws_simultaneous() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); let endpoint01: Vec = vec![ format!("ws/127.0.0.1:{}", 15020).parse().unwrap(), @@ -379,8 +356,6 @@ mod tests { format!("ws/127.0.0.1:{}", 15033).parse().unwrap(), ]; - task::block_on(async { - transport_simultaneous(endpoint01, endpoint02).await; - }); + transport_simultaneous(endpoint01, endpoint02).await; } } diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index 11839aef2a..38534a1a17 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::{prelude::FutureExt, task}; use std::fmt::Write as _; use std::{ any::Any, @@ -22,7 +21,7 @@ use std::{ }, time::Duration, }; -use zenoh_core::zasync_executor_init; +use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ core::{ @@ -225,12 +224,6 @@ const MSG_SIZE_ALL: [usize; 2] = [1_024, 131_072]; const MSG_SIZE_LOWLATENCY: [usize; 2] = [1_024, 65000]; const MSG_SIZE_NOFRAG: [usize; 1] = [1_024]; -macro_rules! ztimeout { - ($f:expr) => { - $f.timeout(TIMEOUT).await.unwrap() - }; -} - // Transport Handler for the router struct SHRouter { count: Arc, @@ -423,7 +416,7 @@ async fn close_transport( ztimeout!(async { while !router_manager.get_transports_unicast().await.is_empty() { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } }); @@ -434,19 +427,19 @@ async fn close_transport( } ztimeout!(async { - while !router_manager.get_listeners().is_empty() { - task::sleep(SLEEP).await; + while !router_manager.get_listeners().await.is_empty() { + tokio::time::sleep(SLEEP).await; } }); // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; ztimeout!(router_manager.close()); ztimeout!(client_manager.close()); // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } async fn test_transport( @@ -491,21 +484,21 @@ async fn test_transport( Reliability::Reliable => { ztimeout!(async { while router_handler.get_count() != MSG_COUNT { - task::sleep(SLEEP_COUNT).await; + tokio::time::sleep(SLEEP_COUNT).await; } }); } Reliability::BestEffort => { ztimeout!(async { while router_handler.get_count() == 0 { - task::sleep(SLEEP_COUNT).await; + tokio::time::sleep(SLEEP_COUNT).await; } }); } }; // Wait a little bit - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } async fn run_single( @@ -599,12 +592,9 @@ async fn run_with_lowlatency_transport( } #[cfg(feature = "transport_tcp")] -#[test] -fn transport_unicast_tcp_only() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_unicast_tcp_only() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // Define the locators let endpoints: Vec = vec![ @@ -623,21 +613,13 @@ fn transport_unicast_tcp_only() { }, ]; // Run - task::block_on(run_with_universal_transport( - &endpoints, - &endpoints, - &channel, - &MSG_SIZE_ALL, - )); + run_with_universal_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_ALL).await; } #[cfg(feature = "transport_tcp")] -#[test] -fn transport_unicast_tcp_only_with_lowlatency_transport() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_unicast_tcp_only_with_lowlatency_transport() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // Define the locators let endpoints: Vec = vec![format!("tcp/127.0.0.1:{}", 16100).parse().unwrap()]; @@ -653,21 +635,13 @@ fn transport_unicast_tcp_only_with_lowlatency_transport() { }, ]; // Run - task::block_on(run_with_lowlatency_transport( - &endpoints, - &endpoints, - &channel, - &MSG_SIZE_LOWLATENCY, - )); + run_with_lowlatency_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_LOWLATENCY).await; } #[cfg(feature = "transport_udp")] -#[test] -fn transport_unicast_udp_only() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_unicast_udp_only() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // Define the locator let endpoints: Vec = vec![ @@ -686,21 +660,13 @@ fn transport_unicast_udp_only() { }, ]; // Run - task::block_on(run_with_universal_transport( - &endpoints, - &endpoints, - &channel, - &MSG_SIZE_NOFRAG, - )); + run_with_universal_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_NOFRAG).await; } #[cfg(feature = "transport_udp")] -#[test] -fn transport_unicast_udp_only_with_lowlatency_transport() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_unicast_udp_only_with_lowlatency_transport() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // Define the locator let endpoints: Vec = vec![format!("udp/127.0.0.1:{}", 16110).parse().unwrap()]; @@ -716,21 +682,13 @@ fn transport_unicast_udp_only_with_lowlatency_transport() { }, ]; // Run - task::block_on(run_with_lowlatency_transport( - &endpoints, - &endpoints, - &channel, - &MSG_SIZE_NOFRAG, - )); + run_with_lowlatency_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_NOFRAG).await; } #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] -#[test] -fn transport_unicast_unix_only() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_unicast_unix_only() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); let f1 = "zenoh-test-unix-socket-5.sock"; let _ = std::fs::remove_file(f1); @@ -748,23 +706,15 @@ fn transport_unicast_unix_only() { }, ]; // Run - task::block_on(run_with_universal_transport( - &endpoints, - &endpoints, - &channel, - &MSG_SIZE_ALL, - )); + run_with_universal_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_ALL).await; let _ = std::fs::remove_file(f1); let _ = std::fs::remove_file(format!("{f1}.lock")); } #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] -#[test] -fn transport_unicast_unix_only_with_lowlatency_transport() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_unicast_unix_only_with_lowlatency_transport() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); let f1 = "zenoh-test-unix-socket-5-lowlatency.sock"; let _ = std::fs::remove_file(f1); @@ -782,23 +732,15 @@ fn transport_unicast_unix_only_with_lowlatency_transport() { }, ]; // Run - task::block_on(run_with_lowlatency_transport( - &endpoints, - &endpoints, - &channel, - &MSG_SIZE_LOWLATENCY, - )); + run_with_lowlatency_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_LOWLATENCY).await; let _ = std::fs::remove_file(f1); let _ = std::fs::remove_file(format!("{f1}.lock")); } #[cfg(feature = "transport_ws")] -#[test] -fn transport_unicast_ws_only() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_unicast_ws_only() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // Define the locators let endpoints: Vec = vec![ @@ -825,21 +767,13 @@ fn transport_unicast_ws_only() { }, ]; // Run - task::block_on(run_with_universal_transport( - &endpoints, - &endpoints, - &channel, - &MSG_SIZE_ALL, - )); + run_with_universal_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_ALL).await; } #[cfg(feature = "transport_ws")] -#[test] -fn transport_unicast_ws_only_with_lowlatency_transport() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_unicast_ws_only_with_lowlatency_transport() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // Define the locators let endpoints: Vec = vec![format!("ws/127.0.0.1:{}", 16120).parse().unwrap()]; @@ -863,21 +797,13 @@ fn transport_unicast_ws_only_with_lowlatency_transport() { }, ]; // Run - task::block_on(run_with_lowlatency_transport( - &endpoints, - &endpoints, - &channel, - &MSG_SIZE_LOWLATENCY, - )); + run_with_lowlatency_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_LOWLATENCY).await; } #[cfg(feature = "transport_unixpipe")] -#[test] -fn transport_unicast_unixpipe_only() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_unicast_unixpipe_only() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // Define the locator let endpoints: Vec = vec![ @@ -896,21 +822,13 @@ fn transport_unicast_unixpipe_only() { }, ]; // Run - task::block_on(run_with_universal_transport( - &endpoints, - &endpoints, - &channel, - &MSG_SIZE_ALL, - )); + run_with_universal_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_ALL).await; } #[cfg(feature = "transport_unixpipe")] -#[test] -fn transport_unicast_unixpipe_only_with_lowlatency_transport() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_unicast_unixpipe_only_with_lowlatency_transport() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // Define the locator let endpoints: Vec = vec![ @@ -930,21 +848,13 @@ fn transport_unicast_unixpipe_only_with_lowlatency_transport() { }, ]; // Run - task::block_on(run_with_lowlatency_transport( - &endpoints, - &endpoints, - &channel, - &MSG_SIZE_LOWLATENCY, - )); + run_with_lowlatency_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_LOWLATENCY).await; } #[cfg(all(feature = "transport_tcp", feature = "transport_udp"))] -#[test] -fn transport_unicast_tcp_udp() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_unicast_tcp_udp() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // Define the locator let endpoints: Vec = vec![ @@ -965,12 +875,7 @@ fn transport_unicast_tcp_udp() { }, ]; // Run - task::block_on(run_with_universal_transport( - &endpoints, - &endpoints, - &channel, - &MSG_SIZE_NOFRAG, - )); + run_with_universal_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_NOFRAG).await; } #[cfg(all( @@ -978,12 +883,9 @@ fn transport_unicast_tcp_udp() { feature = "transport_unixsock-stream", target_family = "unix" ))] -#[test] -fn transport_unicast_tcp_unix() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_unicast_tcp_unix() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); let f1 = "zenoh-test-unix-socket-6.sock"; let _ = std::fs::remove_file(f1); @@ -1005,12 +907,7 @@ fn transport_unicast_tcp_unix() { }, ]; // Run - task::block_on(run_with_universal_transport( - &endpoints, - &endpoints, - &channel, - &MSG_SIZE_ALL, - )); + run_with_universal_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_ALL).await; let _ = std::fs::remove_file(f1); let _ = std::fs::remove_file(format!("{f1}.lock")); } @@ -1020,12 +917,9 @@ fn transport_unicast_tcp_unix() { feature = "transport_unixsock-stream", target_family = "unix" ))] -#[test] -fn transport_unicast_udp_unix() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_unicast_udp_unix() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); let f1 = "zenoh-test-unix-socket-7.sock"; let _ = std::fs::remove_file(f1); @@ -1047,12 +941,7 @@ fn transport_unicast_udp_unix() { }, ]; // Run - task::block_on(run_with_universal_transport( - &endpoints, - &endpoints, - &channel, - &MSG_SIZE_NOFRAG, - )); + run_with_universal_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_NOFRAG).await; let _ = std::fs::remove_file(f1); let _ = std::fs::remove_file(format!("{f1}.lock")); } @@ -1063,12 +952,9 @@ fn transport_unicast_udp_unix() { feature = "transport_unixsock-stream", target_family = "unix" ))] -#[test] -fn transport_unicast_tcp_udp_unix() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_unicast_tcp_udp_unix() { let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); let f1 = "zenoh-test-unix-socket-8.sock"; let _ = std::fs::remove_file(f1); @@ -1092,25 +978,17 @@ fn transport_unicast_tcp_udp_unix() { }, ]; // Run - task::block_on(run_with_universal_transport( - &endpoints, - &endpoints, - &channel, - &MSG_SIZE_NOFRAG, - )); + run_with_universal_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_NOFRAG).await; let _ = std::fs::remove_file(f1); let _ = std::fs::remove_file(format!("{f1}.lock")); } #[cfg(all(feature = "transport_tls", target_family = "unix"))] -#[test] -fn transport_unicast_tls_only_server() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_unicast_tls_only_server() { use zenoh_link::tls::config::*; let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // Define the locator let mut endpoint: EndPoint = format!("tls/localhost:{}", 16070).parse().unwrap(); @@ -1148,24 +1026,15 @@ fn transport_unicast_tls_only_server() { ]; // Run let endpoints = vec![endpoint]; - task::block_on(run_with_universal_transport( - &endpoints, - &endpoints, - &channel, - &MSG_SIZE_ALL, - )); + run_with_universal_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_ALL).await; } #[cfg(feature = "transport_quic")] -#[test] -fn transport_unicast_quic_only_server() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_unicast_quic_only_server() { use zenoh_link::quic::config::*; let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); - // Define the locator let mut endpoint: EndPoint = format!("quic/localhost:{}", 16080).parse().unwrap(); endpoint @@ -1202,23 +1071,15 @@ fn transport_unicast_quic_only_server() { ]; // Run let endpoints = vec![endpoint]; - task::block_on(run_with_universal_transport( - &endpoints, - &endpoints, - &channel, - &MSG_SIZE_ALL, - )); + run_with_universal_transport(&endpoints, &endpoints, &channel, &MSG_SIZE_ALL).await; } #[cfg(all(feature = "transport_tls", target_family = "unix"))] -#[test] -fn transport_unicast_tls_only_mutual_success() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_unicast_tls_only_mutual_success() { use zenoh_link::tls::config::*; let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); let client_auth = "true"; @@ -1275,24 +1136,22 @@ fn transport_unicast_tls_only_mutual_success() { // Run let client_endpoints = vec![client_endpoint]; let server_endpoints = vec![server_endpoint]; - task::block_on(run_with_universal_transport( + run_with_universal_transport( &client_endpoints, &server_endpoints, &channel, &MSG_SIZE_ALL, - )); + ) + .await; } #[cfg(all(feature = "transport_tls", target_family = "unix"))] -#[test] -fn transport_unicast_tls_only_mutual_no_client_certs_failure() { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn transport_unicast_tls_only_mutual_no_client_certs_failure() { use std::vec; use zenoh_link::tls::config::*; let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); // Define the locator let mut client_endpoint: EndPoint = ("tls/localhost:10462").parse().unwrap(); @@ -1343,12 +1202,14 @@ fn transport_unicast_tls_only_mutual_no_client_certs_failure() { let client_endpoints = vec![client_endpoint]; let server_endpoints = vec![server_endpoint]; let result = std::panic::catch_unwind(|| { - task::block_on(run_with_universal_transport( - &client_endpoints, - &server_endpoints, - &channel, - &MSG_SIZE_ALL, - )) + tokio::runtime::Runtime::new() + .unwrap() + .block_on(run_with_universal_transport( + &client_endpoints, + &server_endpoints, + &channel, + &MSG_SIZE_ALL, + )) }); assert!(result.is_err()); } @@ -1359,9 +1220,6 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { use zenoh_link::tls::config::*; let _ = env_logger::try_init(); - task::block_on(async { - zasync_executor_init!(); - }); let client_auth = "true"; @@ -1423,12 +1281,14 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { let client_endpoints = vec![client_endpoint]; let server_endpoints = vec![server_endpoint]; let result = std::panic::catch_unwind(|| { - task::block_on(run_with_universal_transport( - &client_endpoints, - &server_endpoints, - &channel, - &MSG_SIZE_ALL, - )) + tokio::runtime::Runtime::new() + .unwrap() + .block_on(run_with_universal_transport( + &client_endpoints, + &server_endpoints, + &channel, + &MSG_SIZE_ALL, + )) }); assert!(result.is_err()); } diff --git a/plugins/zenoh-plugin-rest/Cargo.toml b/plugins/zenoh-plugin-rest/Cargo.toml index a797ab0c59..8459bb5172 100644 --- a/plugins/zenoh-plugin-rest/Cargo.toml +++ b/plugins/zenoh-plugin-rest/Cargo.toml @@ -33,7 +33,7 @@ crate-type = ["cdylib", "rlib"] [dependencies] anyhow = { workspace = true, features = ["default"] } -async-std = { workspace = true, features = ["default"] } +async-std = { workspace = true, features = ["default", "attributes"] } base64 = { workspace = true } const_format = { workspace = true } env_logger = { workspace = true } diff --git a/zenoh-ext/Cargo.toml b/zenoh-ext/Cargo.toml index 91b0283ddb..372eaf234a 100644 --- a/zenoh-ext/Cargo.toml +++ b/zenoh-ext/Cargo.toml @@ -31,7 +31,7 @@ unstable = [] default = [] [dependencies] -async-std = { workspace = true, features = ["attributes", "unstable"] } +tokio = { workspace = true, features = ["rt", "sync", "time", "macros", "io-std"] } bincode = { workspace = true } env_logger = { workspace = true } flume = { workspace = true } @@ -44,6 +44,7 @@ zenoh-macros = { workspace = true } zenoh-result = { workspace = true } zenoh-sync = { workspace = true } zenoh-util = { workspace = true } +zenoh-runtime = { workspace = true } [dev-dependencies] clap = { workspace = true, features = ["derive"] } diff --git a/zenoh-ext/examples/z_member.rs b/zenoh-ext/examples/z_member.rs index 9099275aab..fb10ac4cd8 100644 --- a/zenoh-ext/examples/z_member.rs +++ b/zenoh-ext/examples/z_member.rs @@ -18,7 +18,7 @@ use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh_ext::group::*; -#[async_std::main] +#[tokio::main] async fn main() { env_logger::init(); let z = Arc::new(zenoh::open(Config::default()).res().await.unwrap()); diff --git a/zenoh-ext/examples/z_pub_cache.rs b/zenoh-ext/examples/z_pub_cache.rs index ecf3824ef4..e564ffb8f1 100644 --- a/zenoh-ext/examples/z_pub_cache.rs +++ b/zenoh-ext/examples/z_pub_cache.rs @@ -11,14 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::task::sleep; use clap::{arg, Command}; use std::time::Duration; use zenoh::config::{Config, ModeDependentValue}; use zenoh::prelude::r#async::*; use zenoh_ext::*; -#[async_std::main] +#[tokio::main] async fn main() { // Initiate logging env_logger::init(); @@ -40,7 +39,7 @@ async fn main() { println!("Press CTRL-C to quit..."); for idx in 0..u32::MAX { - sleep(Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_secs(1)).await; let buf = format!("[{idx:4}] {value}"); println!("Put Data ('{}': '{}')", &key_expr, buf); session.put(&key_expr, buf).res().await.unwrap(); diff --git a/zenoh-ext/examples/z_query_sub.rs b/zenoh-ext/examples/z_query_sub.rs index 7ecf866355..570d15ac15 100644 --- a/zenoh-ext/examples/z_query_sub.rs +++ b/zenoh-ext/examples/z_query_sub.rs @@ -18,7 +18,7 @@ use zenoh::prelude::r#async::*; use zenoh::query::ReplyKeyExpr; use zenoh_ext::*; -#[async_std::main] +#[tokio::main] async fn main() { // Initiate logging env_logger::init(); diff --git a/zenoh-ext/examples/z_view_size.rs b/zenoh-ext/examples/z_view_size.rs index 8304a26f31..64e7b3ea4c 100644 --- a/zenoh-ext/examples/z_view_size.rs +++ b/zenoh-ext/examples/z_view_size.rs @@ -18,7 +18,7 @@ use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh_ext::group::*; -#[async_std::main] +#[tokio::main] async fn main() { env_logger::init(); diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index aece581fde..3595ccad08 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -14,8 +14,6 @@ //! To manage groups and group memeberships -use async_std::sync::Mutex; -use async_std::task::JoinHandle; use flume::{Receiver, Sender}; use futures::prelude::*; use futures::select; @@ -25,6 +23,8 @@ use std::convert::TryInto; use std::ops::Add; use std::sync::Arc; use std::time::{Duration, Instant}; +use tokio::sync::Mutex; +use tokio::task::JoinHandle; use zenoh::prelude::r#async::*; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; @@ -169,9 +169,9 @@ pub struct Group { impl Drop for Group { fn drop(&mut self) { // cancel background tasks - async_std::task::block_on(async { + tokio::runtime::Handle::current().block_on(async { while let Some(handle) = self.tasks.pop() { - let _ = handle.cancel().await; + handle.abort(); } }); } @@ -186,7 +186,7 @@ async fn keep_alive_task(state: Arc) { .lease .mul_f32(state.local_member.refresh_ratio); loop { - async_std::task::sleep(period).await; + tokio::time::sleep(period).await; log::trace!("Sending Keep Alive for: {}", &state.local_member.mid); let _ = state.group_publisher.put(buf.clone()).res().await; } @@ -195,7 +195,7 @@ async fn keep_alive_task(state: Arc) { fn spawn_watchdog(s: Arc, period: Duration) -> JoinHandle<()> { let watch_dog = async move { loop { - async_std::task::sleep(period).await; + tokio::time::sleep(period).await; let now = Instant::now(); let mut ms = s.members.lock().await; let expired_members: Vec = ms @@ -221,7 +221,7 @@ fn spawn_watchdog(s: Arc, period: Duration) -> JoinHandle<()> { } } }; - async_std::task::spawn(watch_dog) + tokio::task::spawn(watch_dog) } async fn query_handler(z: Arc, state: Arc) { @@ -397,10 +397,10 @@ impl Group { // If the liveliness is manual it is the user who has to assert it. if is_auto_liveliness { - async_std::task::spawn(keep_alive_task(state.clone())); + tokio::task::spawn(keep_alive_task(state.clone())); } - let events_task = async_std::task::spawn(net_event_handler(z.clone(), state.clone())); - let queries_task = async_std::task::spawn(query_handler(z.clone(), state.clone())); + let events_task = tokio::task::spawn(net_event_handler(z.clone(), state.clone())); + let queries_task = tokio::task::spawn(query_handler(z.clone(), state.clone())); let watchdog_task = spawn_watchdog(state.clone(), Duration::from_secs(1)); Ok(Group { state, @@ -461,7 +461,7 @@ impl Group { }; let r: bool = select! { p = f.fuse() => p, - _ = async_std::task::sleep(timeout).fuse() => false, + _ = tokio::time::sleep(timeout).fuse() => false, }; r } diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index cd5ed964ad..c8c5679c91 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -11,10 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::channel::{bounded, Sender}; -use async_std::task; -use futures::select; -use futures::{FutureExt, StreamExt}; +use flume::{bounded, Sender}; use std::collections::{HashMap, VecDeque}; use std::convert::TryInto; use std::future::Ready; @@ -168,14 +165,15 @@ impl<'a> PublicationCache<'a> { let resources_limit = conf.resources_limit; let history = conf.history; - let (stoptx, mut stoprx) = bounded::(1); - task::spawn(async move { + // TODO(yuyuan): use CancellationToken to manage it + let (stoptx, stoprx) = bounded::(1); + zenoh_runtime::ZRuntime::TX.spawn(async move { let mut cache: HashMap> = HashMap::with_capacity(resources_limit.unwrap_or(32)); let limit = resources_limit.unwrap_or(usize::MAX); loop { - select!( + tokio::select! { // on publication received by the local subscriber, store it sample = sub_recv.recv_async() => { if let Ok(sample) = sample { @@ -237,10 +235,8 @@ impl<'a> PublicationCache<'a> { }, // When stoptx is dropped, stop the task - _ = stoprx.next().fuse() => { - return - } - ); + _ = stoprx.recv_async() => return + } } }); diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 4a7c4f2ded..978d348da1 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -595,7 +595,8 @@ where /// /// # Examples /// ```no_run -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh_ext::*; /// @@ -615,7 +616,7 @@ where /// while let Ok(sample) = subscriber.recv_async().await { /// println!("Received: {:?}", sample); /// } -/// # }) +/// # } /// ``` pub struct FetchingSubscriber<'a, Receiver> { subscriber: Subscriber<'a, ()>, @@ -728,7 +729,8 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { /// /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh_ext::*; /// @@ -758,7 +760,7 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { /// .res() /// .await /// .unwrap(); - /// # }) + /// # } /// ``` #[inline] pub fn fetch< @@ -810,7 +812,8 @@ impl Drop for RepliesHandler { /// /// # Examples /// ```no_run -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// # use zenoh::prelude::r#async::*; /// # use zenoh_ext::*; /// # @@ -839,7 +842,7 @@ impl Drop for RepliesHandler { /// .res() /// .await /// .unwrap(); -/// # }) +/// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] pub struct FetchBuilder< diff --git a/zenoh-ext/src/session_ext.rs b/zenoh-ext/src/session_ext.rs index 2a2c1df97b..73fbd7dfc4 100644 --- a/zenoh-ext/src/session_ext.rs +++ b/zenoh-ext/src/session_ext.rs @@ -57,7 +57,8 @@ impl<'a> SessionExt<'a, 'a> for Session { impl<'s> SessionExt<'s, 'static> for Arc { /// Examples: /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh::config::ModeDependentValue::Unique; /// use zenoh_ext::SessionExt; @@ -66,10 +67,10 @@ impl<'s> SessionExt<'s, 'static> for Arc { /// config.timestamping.set_enabled(Some(Unique(true))); /// let session = zenoh::open(config).res().await.unwrap().into_arc(); /// let publication_cache = session.declare_publication_cache("key/expression").res().await.unwrap(); - /// async_std::task::spawn(async move { + /// tokio::task::spawn(async move { /// publication_cache.key_expr(); /// }).await; - /// # }) + /// # } /// ``` fn declare_publication_cache<'b, 'c, TryIntoKeyExpr>( &'s self, diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index a2987f8833..192a0a3121 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -47,7 +47,7 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { /// Create a [`FetchingSubscriber`](super::FetchingSubscriber). /// - /// This operation returns a [`FetchingSubscriberBuilder`](FetchingSubscriberBuilder) that can be used to finely configure the subscriber. + /// This operation returns a [`FetchingSubscriberBuilder`](FetchingSubscriberBuilder) that can be used to finely configure the subscriber. /// As soon as built (calling `.wait()` or `.await` on the `FetchingSubscriberBuilder`), the `FetchingSubscriber` /// will run the given `fetch` funtion. The user defined `fetch` funtion should fetch some samples and return them /// through the callback funtion. Those samples will be merged with the received publications and made available in the receiver. @@ -57,7 +57,8 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { /// /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh_ext::*; /// @@ -77,7 +78,7 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { /// while let Ok(sample) = subscriber.recv_async().await { /// println!("Received: {:?}", sample); /// } - /// # }) + /// # } /// ``` fn fetching< Fetch: FnOnce(Box) -> ZResult<()>, @@ -93,7 +94,7 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { /// Create a [`FetchingSubscriber`](super::FetchingSubscriber) that will perform a query (`session.get()`) as it's /// initial fetch. /// - /// This operation returns a [`QueryingSubscriberBuilder`](QueryingSubscriberBuilder) that can be used to finely configure the subscriber. + /// This operation returns a [`QueryingSubscriberBuilder`](QueryingSubscriberBuilder) that can be used to finely configure the subscriber. /// As soon as built (calling `.wait()` or `.await` on the `QueryingSubscriberBuilder`), the `FetchingSubscriber` /// will issue a query on a given key expression (by default it uses the same key expression than it subscribes to). /// The results of the query will be merged with the received publications and made available in the receiver. @@ -103,7 +104,8 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { /// /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh_ext::*; /// @@ -117,7 +119,7 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { /// while let Ok(sample) = subscriber.recv_async().await { /// println!("Received: {:?}", sample); /// } - /// # }) + /// # } /// ``` fn querying(self) -> QueryingSubscriberBuilder<'a, 'b, Self::KeySpace, Handler>; } @@ -129,7 +131,7 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// Create a [`FetchingSubscriber`](super::FetchingSubscriber). /// - /// This operation returns a [`FetchingSubscriberBuilder`](FetchingSubscriberBuilder) that can be used to finely configure the subscriber. + /// This operation returns a [`FetchingSubscriberBuilder`](FetchingSubscriberBuilder) that can be used to finely configure the subscriber. /// As soon as built (calling `.wait()` or `.await` on the `FetchingSubscriberBuilder`), the `FetchingSubscriber` /// will run the given `fetch` funtion. The user defined `fetch` funtion should fetch some samples and return them /// through the callback funtion. Those samples will be merged with the received publications and made available in the receiver. @@ -139,7 +141,8 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh_ext::*; /// @@ -159,7 +162,7 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// while let Ok(sample) = subscriber.recv_async().await { /// println!("Received: {:?}", sample); /// } - /// # }) + /// # } /// ``` fn fetching< Fetch: FnOnce(Box) -> ZResult<()>, @@ -187,7 +190,7 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// Create a [`FetchingSubscriber`](super::FetchingSubscriber) that will perform a query (`session.get()`) as it's /// initial fetch. /// - /// This operation returns a [`QueryingSubscriberBuilder`](QueryingSubscriberBuilder) that can be used to finely configure the subscriber. + /// This operation returns a [`QueryingSubscriberBuilder`](QueryingSubscriberBuilder) that can be used to finely configure the subscriber. /// As soon as built (calling `.wait()` or `.await` on the `QueryingSubscriberBuilder`), the `FetchingSubscriber` /// will issue a query on a given key expression (by default it uses the same key expression than it subscribes to). /// The results of the query will be merged with the received publications and made available in the receiver. @@ -197,7 +200,8 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh_ext::*; /// @@ -211,7 +215,7 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// while let Ok(sample) = subscriber.recv_async().await { /// println!("Received: {:?}", sample); /// } - /// # }) + /// # } /// ``` fn querying(self) -> QueryingSubscriberBuilder<'a, 'b, Self::KeySpace, Handler> { QueryingSubscriberBuilder { @@ -240,7 +244,7 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// Create a fetching liveliness subscriber ([`FetchingSubscriber`](super::FetchingSubscriber)). /// - /// This operation returns a [`FetchingSubscriberBuilder`](FetchingSubscriberBuilder) that can be used to finely configure the subscriber. + /// This operation returns a [`FetchingSubscriberBuilder`](FetchingSubscriberBuilder) that can be used to finely configure the subscriber. /// As soon as built (calling `.wait()` or `.await` on the `FetchingSubscriberBuilder`), the `FetchingSubscriber` /// will run the given `fetch` funtion. The user defined `fetch` funtion should fetch some samples and return them /// through the callback funtion. Those samples will be merged with the received publications and made available in the receiver. @@ -251,7 +255,8 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh_ext::*; /// @@ -273,7 +278,7 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// while let Ok(sample) = subscriber.recv_async().await { /// println!("Received: {:?}", sample); /// } - /// # }) + /// # } /// ``` fn fetching< Fetch: FnOnce(Box) -> ZResult<()>, @@ -301,7 +306,7 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// Create a fetching liveliness subscriber ([`FetchingSubscriber`](super::FetchingSubscriber)) that will perform a /// liveliness query (`session.liveliness().get()`) as it's initial fetch. /// - /// This operation returns a [`QueryingSubscriberBuilder`](QueryingSubscriberBuilder) that can be used to finely configure the subscriber. + /// This operation returns a [`QueryingSubscriberBuilder`](QueryingSubscriberBuilder) that can be used to finely configure the subscriber. /// As soon as built (calling `.wait()` or `.await` on the `QueryingSubscriberBuilder`), the `FetchingSubscriber` /// will issue a liveliness query on a given key expression (by default it uses the same key expression than it subscribes to). /// The results of the query will be merged with the received publications and made available in the receiver. @@ -312,7 +317,8 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh_ext::*; /// @@ -327,7 +333,7 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// while let Ok(sample) = subscriber.recv_async().await { /// println!("Received: {:?}", sample); /// } - /// # }) + /// # } /// ``` fn querying(self) -> QueryingSubscriberBuilder<'a, 'b, Self::KeySpace, Handler> { QueryingSubscriberBuilder { diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 11ecfad1bf..0e28905253 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -63,8 +63,8 @@ default = [ ] [dependencies] -async-global-executor = { workspace = true } -async-std = { workspace = true, features = ["attributes"] } +tokio = { workspace = true, features = ["rt", "macros", "time"] } +tokio-util = { workspace = true } async-trait = { workspace = true } base64 = { workspace = true } const_format = { workspace = true } @@ -104,6 +104,7 @@ zenoh-shm = { workspace = true, optional = true } zenoh-sync = { workspace = true } zenoh-transport = { workspace = true } zenoh-util = { workspace = true } +zenoh-runtime = { workspace = true } [build-dependencies] rustc_version = { workspace = true } diff --git a/zenoh/src/admin.rs b/zenoh/src/admin.rs index 8cdf638af5..7fd972c9a6 100644 --- a/zenoh/src/admin.rs +++ b/zenoh/src/admin.rs @@ -18,7 +18,6 @@ use crate::{ sample::DataInfo, Sample, Session, ZResult, }; -use async_std::task; use std::{ collections::hash_map::DefaultHasher, hash::{Hash, Hasher}, @@ -92,12 +91,16 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { } if let Ok(own_zid) = keyexpr::new(&session.zid().to_string()) { - for transport in task::block_on(session.runtime.manager().get_transports_unicast()) { + for transport in zenoh_runtime::ZRuntime::Net + .block_in_place(session.runtime.manager().get_transports_unicast()) + { if let Ok(peer) = transport.get_peer() { reply_peer(own_zid, &query, peer); } } - for transport in task::block_on(session.runtime.manager().get_transports_multicast()) { + for transport in zenoh_runtime::ZRuntime::Net + .block_in_place(session.runtime.manager().get_transports_multicast()) + { for peer in transport.get_peers().unwrap_or_default() { reply_peer(own_zid, &query, peer); } diff --git a/zenoh/src/info.rs b/zenoh/src/info.rs index 5b9ef457ec..3e0efdf134 100644 --- a/zenoh/src/info.rs +++ b/zenoh/src/info.rs @@ -14,7 +14,6 @@ //! Tools to access information about the current zenoh [`Session`](crate::Session). use crate::SessionRef; -use async_std::task; use std::future::Ready; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_protocol::core::{WhatAmI, ZenohId}; @@ -24,12 +23,13 @@ use zenoh_protocol::core::{WhatAmI, ZenohId}; /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let zid = session.info().zid().res().await; -/// # }) +/// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] @@ -61,13 +61,14 @@ impl<'a> AsyncResolve for ZidBuilder<'a> { /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let mut routers_zid = session.info().routers_zid().res().await; /// while let Some(router_zid) = routers_zid.next() {} -/// # }) +/// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] @@ -82,7 +83,8 @@ impl<'a> Resolvable for RoutersZidBuilder<'a> { impl<'a> SyncResolve for RoutersZidBuilder<'a> { fn res_sync(self) -> Self::To { Box::new( - task::block_on(self.session.runtime.manager().get_transports_unicast()) + zenoh_runtime::ZRuntime::Application + .block_in_place(self.session.runtime.manager().get_transports_unicast()) .into_iter() .filter_map(|s| { s.get_whatami() @@ -107,14 +109,15 @@ impl<'a> AsyncResolve for RoutersZidBuilder<'a> { /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let zid = session.info().zid().res().await; /// let mut peers_zid = session.info().peers_zid().res().await; /// while let Some(peer_zid) = peers_zid.next() {} -/// # }) +/// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] @@ -129,7 +132,8 @@ impl<'a> Resolvable for PeersZidBuilder<'a> { impl<'a> SyncResolve for PeersZidBuilder<'a> { fn res_sync(self) -> ::To { Box::new( - task::block_on(self.session.runtime.manager().get_transports_unicast()) + zenoh_runtime::ZRuntime::Application + .block_in_place(self.session.runtime.manager().get_transports_unicast()) .into_iter() .filter_map(|s| { s.get_whatami() @@ -154,13 +158,14 @@ impl<'a> AsyncResolve for PeersZidBuilder<'a> { /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let info = session.info(); /// let zid = info.zid().res().await; -/// # }) +/// # } /// ``` pub struct SessionInfo<'a> { pub(crate) session: SessionRef<'a>, @@ -171,12 +176,13 @@ impl SessionInfo<'_> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let zid = session.info().zid().res().await; - /// # }) + /// # } /// ``` pub fn zid(&self) -> ZidBuilder<'_> { ZidBuilder { @@ -189,13 +195,14 @@ impl SessionInfo<'_> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let mut routers_zid = session.info().routers_zid().res().await; /// while let Some(router_zid) = routers_zid.next() {} - /// # }) + /// # } /// ``` pub fn routers_zid(&self) -> RoutersZidBuilder<'_> { RoutersZidBuilder { @@ -207,13 +214,14 @@ impl SessionInfo<'_> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let mut peers_zid = session.info().peers_zid().res().await; /// while let Some(peer_zid) = peers_zid.next() {} - /// # }) + /// # } /// ``` pub fn peers_zid(&self) -> PeersZidBuilder<'_> { PeersZidBuilder { diff --git a/zenoh/src/key_expr.rs b/zenoh/src/key_expr.rs index c563592119..628f07611a 100644 --- a/zenoh/src/key_expr.rs +++ b/zenoh/src/key_expr.rs @@ -606,13 +606,14 @@ impl<'a> Undeclarable<&'a Session, KeyExprUndeclaration<'a>> for KeyExpr<'a> { /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let key_expr = session.declare_keyexpr("key/expression").res().await.unwrap(); /// session.undeclare(key_expr).res().await.unwrap(); -/// # }) +/// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] pub struct KeyExprUndeclaration<'a> { diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 0a8f1feb64..3ee115e293 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -34,7 +34,7 @@ //! ``` //! use zenoh::prelude::r#async::*; //! -//! #[async_std::main] +//! #[tokio::main] //! async fn main() { //! let session = zenoh::open(config::default()).res().await.unwrap(); //! session.put("key/expression", "value").res().await.unwrap(); @@ -48,7 +48,7 @@ //! use futures::prelude::*; //! use zenoh::prelude::r#async::*; //! -//! #[async_std::main] +//! #[tokio::main] //! async fn main() { //! let session = zenoh::open(config::default()).res().await.unwrap(); //! let subscriber = session.declare_subscriber("key/expression").res().await.unwrap(); @@ -65,7 +65,7 @@ //! use futures::prelude::*; //! use zenoh::prelude::r#async::*; //! -//! #[async_std::main] +//! #[tokio::main] //! async fn main() { //! let session = zenoh::open(config::default()).res().await.unwrap(); //! let replies = session.get("key/expression").res().await.unwrap(); @@ -157,7 +157,7 @@ pub mod time { pub use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; - /// Generates a reception [`Timestamp`] with id=0x01. + /// Generates a reception [`Timestamp`] with id=0x01. /// This operation should be called if a timestamp is required for an incoming [`zenoh::Sample`](crate::Sample) /// that doesn't contain any timestamp. pub fn new_reception_timestamp() -> Timestamp { @@ -173,8 +173,8 @@ pub mod properties { use super::prelude::Value; pub use zenoh_collections::Properties; - /// Convert a set of [`Properties`] into a [`Value`]. - /// For instance, Properties: `[("k1", "v1"), ("k2, v2")]` + /// Convert a set of [`Properties`] into a [`Value`]. + /// For instance, Properties: `[("k1", "v1"), ("k2, v2")]` /// is converted into Json: `{ "k1": "v1", "k2": "v2" }` pub fn properties_to_json_value(props: &Properties) -> Value { let json_map = props @@ -201,7 +201,8 @@ pub mod scouting; /// /// # Examples /// ```no_run -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh::scouting::WhatAmI; /// @@ -212,7 +213,7 @@ pub mod scouting; /// while let Ok(hello) = receiver.recv_async().await { /// println!("{}", hello); /// } -/// # }) +/// # } /// ``` pub fn scout, TryIntoConfig>( what: I, @@ -238,15 +239,17 @@ where /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// # }) +/// # } /// ``` /// /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use std::str::FromStr; /// use zenoh::prelude::r#async::*; /// @@ -255,7 +258,7 @@ where /// config.connect.endpoints.extend("tcp/10.10.10.10:7447,tcp/11.11.11.11:7447".split(',').map(|s|s.parse().unwrap())); /// /// let session = zenoh::open(config).res().await.unwrap(); -/// # }) +/// # } /// ``` pub fn open(config: TryIntoConfig) -> OpenBuilder where @@ -269,11 +272,12 @@ where /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// # }) +/// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] pub struct OpenBuilder diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index 0883041bb7..9cf3b9c362 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -64,7 +64,8 @@ lazy_static::lazy_static!( /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -74,7 +75,7 @@ lazy_static::lazy_static!( /// .res() /// .await /// .unwrap(); -/// # }) +/// # } /// ``` #[zenoh_macros::unstable] pub struct Liveliness<'a> { @@ -91,7 +92,8 @@ impl<'a> Liveliness<'a> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -101,7 +103,7 @@ impl<'a> Liveliness<'a> { /// .res() /// .await /// .unwrap(); - /// # }) + /// # } /// ``` #[zenoh_macros::unstable] pub fn declare_token<'b, TryIntoKeyExpr>( @@ -126,7 +128,8 @@ impl<'a> Liveliness<'a> { /// /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -137,7 +140,7 @@ impl<'a> Liveliness<'a> { /// SampleKind::Delete => println!("Lost liveliness: {}", sample.key_expr), /// } /// } - /// # }) + /// # } /// ``` #[zenoh_macros::unstable] pub fn declare_subscriber<'b, TryIntoKeyExpr>( @@ -163,7 +166,8 @@ impl<'a> Liveliness<'a> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -173,7 +177,7 @@ impl<'a> Liveliness<'a> { /// println!(">> Liveliness token {}", sample.key_expr); /// } /// } - /// # }) + /// # } /// ``` #[zenoh_macros::unstable] pub fn get<'b: 'a, TryIntoKeyExpr>( @@ -202,7 +206,8 @@ impl<'a> Liveliness<'a> { /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -212,7 +217,7 @@ impl<'a> Liveliness<'a> { /// .res() /// .await /// .unwrap(); -/// # }) +/// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[zenoh_macros::unstable] @@ -276,7 +281,8 @@ pub(crate) struct LivelinessTokenState { /// /// # Examples /// ```no_run -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -286,7 +292,7 @@ pub(crate) struct LivelinessTokenState { /// .res() /// .await /// .unwrap(); -/// # }) +/// # } /// ``` #[zenoh_macros::unstable] #[derive(Debug)] @@ -300,7 +306,8 @@ pub struct LivelinessToken<'a> { /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -312,7 +319,7 @@ pub struct LivelinessToken<'a> { /// .unwrap(); /// /// liveliness.undeclare().res().await.unwrap(); -/// # }) +/// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[zenoh_macros::unstable] @@ -352,7 +359,8 @@ impl<'a> LivelinessToken<'a> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -364,7 +372,7 @@ impl<'a> LivelinessToken<'a> { /// .unwrap(); /// /// liveliness.undeclare().res().await.unwrap(); - /// # }) + /// # } /// ``` #[inline] pub fn undeclare(self) -> impl Resolve> + 'a { @@ -392,7 +400,8 @@ impl Drop for LivelinessToken<'_> { /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -403,7 +412,7 @@ impl Drop for LivelinessToken<'_> { /// .res() /// .await /// .unwrap(); -/// # }) +/// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[zenoh_macros::unstable] @@ -420,7 +429,8 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -430,7 +440,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// .res() /// .await /// .unwrap(); - /// # }) + /// # } /// ``` #[inline] #[zenoh_macros::unstable] @@ -460,7 +470,8 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -471,7 +482,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// .res() /// .await /// .unwrap(); - /// # }) + /// # } /// ``` #[inline] #[zenoh_macros::unstable] @@ -489,7 +500,8 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -502,7 +514,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// while let Ok(sample) = subscriber.recv_async().await { /// println!("Received: {} {}", sample.key_expr, sample.value); /// } - /// # }) + /// # } /// ``` #[inline] #[zenoh_macros::unstable] @@ -580,7 +592,8 @@ where /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// # use std::convert::TryFrom; /// use zenoh::prelude::r#async::*; /// use zenoh::query::*; @@ -598,7 +611,7 @@ where /// Err(err) => println!("Received (ERROR: '{}')", String::try_from(&err).unwrap()), /// } /// } -/// # }) +/// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] @@ -614,7 +627,8 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -625,7 +639,7 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { /// .res() /// .await /// .unwrap(); - /// # }) + /// # } /// ``` #[inline] pub fn callback(self, callback: Callback) -> LivelinessGetBuilder<'a, 'b, Callback> @@ -653,7 +667,8 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -665,7 +680,7 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { /// .res() /// .await /// .unwrap(); - /// # }) + /// # } /// ``` #[inline] pub fn callback_mut( @@ -682,7 +697,8 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -696,7 +712,7 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { /// while let Ok(reply) = replies.recv_async().await { /// println!("Received {:?}", reply.sample); /// } - /// # }) + /// # } /// ``` #[inline] pub fn with(self, handler: Handler) -> LivelinessGetBuilder<'a, 'b, Handler> diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index a655d2f0a3..020d796a1a 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -43,12 +43,12 @@ use crate::{ }, runtime::Runtime, }; -use async_std::task::JoinHandle; use std::{ any::Any, collections::{HashMap, HashSet}, sync::Arc, }; +use tokio::task::JoinHandle; use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::{ common::ZExtBody, @@ -128,8 +128,8 @@ impl HatTables { fn schedule_compute_trees(&mut self, tables_ref: Arc) { log::trace!("Schedule computations"); if self.peers_trees_task.is_none() { - let task = Some(async_std::task::spawn(async move { - async_std::task::sleep(std::time::Duration::from_millis( + let task = Some(zenoh_runtime::ZRuntime::Net.spawn(async move { + tokio::time::sleep(std::time::Duration::from_millis( *TREES_COMPUTATION_DELAY_MS, )) .await; diff --git a/zenoh/src/net/routing/hat/linkstate_peer/network.rs b/zenoh/src/net/routing/hat/linkstate_peer/network.rs index ecd535eb86..182a721a27 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/network.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/network.rs @@ -15,7 +15,6 @@ use crate::net::codec::Zenoh080Routing; use crate::net::protocol::linkstate::{LinkState, LinkStateList}; use crate::net::routing::dispatcher::tables::NodeId; use crate::net::runtime::Runtime; -use async_std::task; use petgraph::graph::NodeIndex; use petgraph::visit::{VisitMap, Visitable}; use std::convert::TryInto; @@ -487,7 +486,8 @@ impl Network { if !self.autoconnect.is_empty() { // Connect discovered peers - if task::block_on(self.runtime.manager().get_transport_unicast(&zid)) + if zenoh_runtime::ZRuntime::Net + .block_in_place(self.runtime.manager().get_transport_unicast(&zid)) .is_none() && self.autoconnect.matches(whatami) { @@ -495,7 +495,7 @@ impl Network { let runtime = self.runtime.clone(); self.runtime.spawn(async move { // random backoff - async_std::task::sleep(std::time::Duration::from_millis( + tokio::time::sleep(std::time::Duration::from_millis( rand::random::() % 100, )) .await; @@ -606,7 +606,8 @@ impl Network { for (_, idx, _) in &link_states { let node = &self.graph[*idx]; if let Some(whatami) = node.whatami { - if task::block_on(self.runtime.manager().get_transport_unicast(&node.zid)) + if zenoh_runtime::ZRuntime::Net + .block_in_place(self.runtime.manager().get_transport_unicast(&node.zid)) .is_none() && self.autoconnect.matches(whatami) { @@ -616,7 +617,7 @@ impl Network { let locators = locators.clone(); self.runtime.spawn(async move { // random backoff - async_std::task::sleep(std::time::Duration::from_millis( + tokio::time::sleep(std::time::Duration::from_millis( rand::random::() % 100, )) .await; diff --git a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs index c413107f85..247412bfdf 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs @@ -14,7 +14,6 @@ use crate::net::codec::Zenoh080Routing; use crate::net::protocol::linkstate::{LinkState, LinkStateList}; use crate::net::runtime::Runtime; -use async_std::task; use petgraph::graph::NodeIndex; use std::convert::TryInto; use vec_map::VecMap; @@ -407,7 +406,8 @@ impl Network { if !self.autoconnect.is_empty() { // Connect discovered peers - if task::block_on(self.runtime.manager().get_transport_unicast(&zid)) + if zenoh_runtime::ZRuntime::Net + .block_in_place(self.runtime.manager().get_transport_unicast(&zid)) .is_none() && self.autoconnect.matches(whatami) { @@ -415,7 +415,7 @@ impl Network { let runtime = self.runtime.clone(); self.runtime.spawn(async move { // random backoff - async_std::task::sleep(std::time::Duration::from_millis( + tokio::time::sleep(std::time::Duration::from_millis( rand::random::() % 100, )) .await; diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 24c837e8f5..5497afc9b8 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -47,13 +47,13 @@ use crate::{ }, runtime::Runtime, }; -use async_std::task::JoinHandle; use std::{ any::Any, collections::{hash_map::DefaultHasher, HashMap, HashSet}, hash::Hasher, sync::Arc, }; +use tokio::task::JoinHandle; use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::{ common::ZExtBody, @@ -243,8 +243,8 @@ impl HatTables { if (net_type == WhatAmI::Router && self.routers_trees_task.is_none()) || (net_type == WhatAmI::Peer && self.peers_trees_task.is_none()) { - let task = Some(async_std::task::spawn(async move { - async_std::task::sleep(std::time::Duration::from_millis( + let task = Some(zenoh_runtime::ZRuntime::Net.spawn(async move { + tokio::time::sleep(std::time::Duration::from_millis( *TREES_COMPUTATION_DELAY_MS, )) .await; diff --git a/zenoh/src/net/routing/hat/router/network.rs b/zenoh/src/net/routing/hat/router/network.rs index aa1209b7ed..7ff42f1dc3 100644 --- a/zenoh/src/net/routing/hat/router/network.rs +++ b/zenoh/src/net/routing/hat/router/network.rs @@ -15,7 +15,6 @@ use crate::net::codec::Zenoh080Routing; use crate::net::protocol::linkstate::{LinkState, LinkStateList}; use crate::net::routing::dispatcher::tables::NodeId; use crate::net::runtime::Runtime; -use async_std::task; use petgraph::graph::NodeIndex; use petgraph::visit::{IntoNodeReferences, VisitMap, Visitable}; use std::convert::TryInto; @@ -492,7 +491,8 @@ impl Network { if !self.autoconnect.is_empty() { // Connect discovered peers - if task::block_on(self.runtime.manager().get_transport_unicast(&zid)) + if zenoh_runtime::ZRuntime::Net + .block_in_place(self.runtime.manager().get_transport_unicast(&zid)) .is_none() && self.autoconnect.matches(whatami) { @@ -500,7 +500,7 @@ impl Network { let runtime = self.runtime.clone(); self.runtime.spawn(async move { // random backoff - async_std::task::sleep(std::time::Duration::from_millis( + tokio::time::sleep(std::time::Duration::from_millis( rand::random::() % 100, )) .await; @@ -611,7 +611,8 @@ impl Network { for (_, idx, _) in &link_states { let node = &self.graph[*idx]; if let Some(whatami) = node.whatami { - if task::block_on(self.runtime.manager().get_transport_unicast(&node.zid)) + if zenoh_runtime::ZRuntime::Net + .block_in_place(self.runtime.manager().get_transport_unicast(&node.zid)) .is_none() && self.autoconnect.matches(whatami) { @@ -621,7 +622,7 @@ impl Network { let locators = locators.clone(); self.runtime.spawn(async move { // random backoff - async_std::task::sleep(std::time::Duration::from_millis( + tokio::time::sleep(std::time::Duration::from_millis( rand::random::() % 100, )) .await; diff --git a/zenoh/src/net/routing/interceptor/downsampling.rs b/zenoh/src/net/routing/interceptor/downsampling.rs index 467ccd6e1e..8cb3b18785 100644 --- a/zenoh/src/net/routing/interceptor/downsampling.rs +++ b/zenoh/src/net/routing/interceptor/downsampling.rs @@ -110,8 +110,8 @@ impl InterceptorFactoryTrait for DownsamplingInterceptorFactory { } struct Timestate { - pub threshold: std::time::Duration, - pub latest_message_timestamp: std::time::Instant, + pub threshold: tokio::time::Duration, + pub latest_message_timestamp: tokio::time::Instant, } pub(crate) struct DownsamplingInterceptor { @@ -140,7 +140,7 @@ impl InterceptorTrait for DownsamplingInterceptor { if let Some(id) = id { let mut ke_state = zlock!(self.ke_state); if let Some(state) = ke_state.get_mut(id) { - let timestamp = std::time::Instant::now(); + let timestamp = tokio::time::Instant::now(); if timestamp - state.latest_message_timestamp >= state.threshold { state.latest_message_timestamp = timestamp; @@ -169,11 +169,11 @@ impl DownsamplingInterceptor { let mut ke_id = KeBoxTree::default(); let mut ke_state = HashMap::default(); for (id, rule) in rules.into_iter().enumerate() { - let mut threshold = std::time::Duration::MAX; - let mut latest_message_timestamp = std::time::Instant::now(); + let mut threshold = tokio::time::Duration::MAX; + let mut latest_message_timestamp = tokio::time::Instant::now(); if rule.freq != 0.0 { threshold = - std::time::Duration::from_nanos((1. / rule.freq * NANOS_PER_SEC) as u64); + tokio::time::Duration::from_nanos((1. / rule.freq * NANOS_PER_SEC) as u64); latest_message_timestamp -= threshold; } ke_id.insert(&rule.key_expr, id); diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 1283ee0fce..16e44f072c 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -19,7 +19,6 @@ use crate::prelude::sync::{Sample, SyncResolve}; use crate::queryable::Query; use crate::queryable::QueryInner; use crate::value::Value; -use async_std::task; use log::{error, trace}; use serde_json::json; use std::collections::HashMap; @@ -200,7 +199,7 @@ impl AdminSpace { .set_plugin_validator(Arc::downgrade(&admin)); let cfg_rx = admin.context.runtime.state.config.subscribe(); - task::spawn({ + tokio::task::spawn({ let admin = admin.clone(); async move { while let Ok(change) = cfg_rx.recv_async().await { @@ -533,7 +532,8 @@ fn router_data(context: &AdminContext, query: Query) { } json }; - let transports: Vec = task::block_on(transport_mgr.get_transports_unicast()) + let transports: Vec = zenoh_runtime::ZRuntime::Net + .block_in_place(transport_mgr.get_transports_unicast()) .iter() .map(transport_to_json) .collect(); diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 7061b38622..282c45f66c 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -26,13 +26,12 @@ use super::routing::router::Router; use crate::config::{unwrap_or_default, Config, ModeDependent, Notifier}; use crate::GIT_VERSION; pub use adminspace::AdminSpace; -use async_std::task::JoinHandle; use futures::stream::StreamExt; use futures::Future; use std::any::Any; use std::sync::Arc; -use stop_token::future::FutureExt; -use stop_token::{StopSource, TimedOutError}; +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use uhlc::{HLCBuilder, HLC}; use zenoh_link::{EndPoint, Link}; use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; @@ -55,7 +54,7 @@ struct RuntimeState { transport_handlers: std::sync::RwLock>>, locators: std::sync::RwLock>, hlc: Option>, - stop_source: std::sync::RwLock>, + token: CancellationToken, } #[derive(Clone)] @@ -121,7 +120,7 @@ impl Runtime { transport_handlers: std::sync::RwLock::new(vec![]), locators: std::sync::RwLock::new(vec![]), hlc, - stop_source: std::sync::RwLock::new(Some(StopSource::new())), + token: CancellationToken::new(), }), }; *handler.runtime.write().unwrap() = Some(runtime.clone()); @@ -156,7 +155,8 @@ impl Runtime { pub async fn close(&self) -> ZResult<()> { log::trace!("Runtime::close())"); - drop(self.state.stop_source.write().unwrap().take()); + // TODO: Check this whether is able to terminate all spawned task by Runtime::spawn + self.state.token.cancel(); self.manager().close().await; Ok(()) } @@ -169,17 +169,18 @@ impl Runtime { self.state.locators.read().unwrap().clone() } - pub(crate) fn spawn(&self, future: F) -> Option>> + pub(crate) fn spawn(&self, future: F) -> JoinHandle<()> where F: Future + Send + 'static, T: Send + 'static, { - self.state - .stop_source - .read() - .unwrap() - .as_ref() - .map(|source| async_std::task::spawn(future.timeout_at(source.token()))) + let token = self.state.token.clone(); + zenoh_runtime::ZRuntime::Net.spawn(async move { + tokio::select! { + _ = token.cancelled() => {} + _ = future => {} + } + }) } pub(crate) fn router(&self) -> Arc { diff --git a/zenoh/src/net/runtime/orchestrator.rs b/zenoh/src/net/runtime/orchestrator.rs index a1a2c8db48..298548f3b7 100644 --- a/zenoh/src/net/runtime/orchestrator.rs +++ b/zenoh/src/net/runtime/orchestrator.rs @@ -12,12 +12,11 @@ // ZettaScale Zenoh Team, // use super::{Runtime, RuntimeSession}; -use async_std::net::UdpSocket; -use async_std::prelude::FutureExt; use futures::prelude::*; use socket2::{Domain, Socket, Type}; use std::net::{IpAddr, Ipv6Addr, SocketAddr}; use std::time::Duration; +use tokio::net::UdpSocket; use zenoh_buffers::reader::DidntRead; use zenoh_buffers::{reader::HasReader, writer::HasWriter}; use zenoh_codec::{RCodec, WCodec, Zenoh080}; @@ -90,18 +89,22 @@ impl Runtime { } _ => { for locator in &peers { - match self - .manager() - .open_transport_unicast(locator.clone()) - .timeout(CONNECTION_TIMEOUT) - .await + match tokio::time::timeout( + CONNECTION_TIMEOUT, + self.manager().open_transport_unicast(locator.clone()), + ) + .await { Ok(Ok(_)) => return Ok(()), Ok(Err(e)) => log::warn!("Unable to connect to {}! {}", locator, e), Err(e) => log::warn!("Unable to connect to {}! {}", locator, e), } } - let e = zerror!("Unable to connect to any of {:?}! ", peers); + let e = zerror!( + "{:?} Unable to connect to any of {:?}! ", + self.manager().get_locators(), + peers + ); log::error!("{}", &e); Err(e.into()) } @@ -150,7 +153,7 @@ impl Runtime { if scouting { self.start_scout(listen, autoconnect, addr, ifaces).await?; } - async_std::task::sleep(delay).await; + tokio::time::sleep(delay).await; Ok(()) } @@ -218,11 +221,10 @@ impl Runtime { match (listen, autoconnect.is_empty()) { (true, false) => { self.spawn(async move { - async_std::prelude::FutureExt::race( - this.responder(&mcast_socket, &sockets), - this.connect_all(&sockets, autoconnect, &addr), - ) - .await; + tokio::select! { + _ = this.responder(&mcast_socket, &sockets) => {}, + _ = this.connect_all(&sockets, autoconnect, &addr) => {}, + } }); } (true, true) => { @@ -417,7 +419,15 @@ impl Runtime { } } log::info!("zenohd listening scout messages on {}", sockaddr); - Ok(std::net::UdpSocket::from(socket).into()) + + // Must set to nonblocking according to the doc of tokio + // https://docs.rs/tokio/latest/tokio/net/struct.UdpSocket.html#notes + socket.set_nonblocking(true)?; + + // UdpSocket::from_std requires a runtime even though it's a sync function + let udp_socket = zenoh_runtime::ZRuntime::Net + .block_in_place(async { UdpSocket::from_std(socket.into()) })?; + Ok(udp_socket) } pub fn bind_ucast_port(addr: IpAddr) -> ZResult { @@ -443,7 +453,15 @@ impl Runtime { bail!(err => "Unable to bind udp port {}:0", addr); } } - Ok(std::net::UdpSocket::from(socket).into()) + + // Must set to nonblocking according to the doc of tokio + // https://docs.rs/tokio/latest/tokio/net/struct.UdpSocket.html#notes + socket.set_nonblocking(true)?; + + // UdpSocket::from_std requires a runtime even though it's a sync function + let udp_socket = zenoh_runtime::ZRuntime::Net + .block_in_place(async { UdpSocket::from_std(socket.into()) })?; + Ok(udp_socket) } async fn spawn_peer_connector(&self, peer: EndPoint) -> ZResult<()> { @@ -464,11 +482,11 @@ impl Runtime { loop { log::trace!("Trying to connect to configured peer {}", peer); let endpoint = peer.clone(); - match self - .manager() - .open_transport_unicast(endpoint) - .timeout(CONNECTION_TIMEOUT) - .await + match tokio::time::timeout( + CONNECTION_TIMEOUT, + self.manager().open_transport_unicast(endpoint), + ) + .await { Ok(Ok(transport)) => { log::debug!("Successfully connected to configured peer {}", peer); @@ -499,7 +517,7 @@ impl Runtime { ); } } - async_std::task::sleep(delay).await; + tokio::time::sleep(delay).await; delay *= CONNECTION_RETRY_PERIOD_INCREASE_FACTOR; if delay > CONNECTION_RETRY_MAX_PERIOD { delay = CONNECTION_RETRY_MAX_PERIOD; @@ -556,7 +574,7 @@ impl Runtime { ); } } - async_std::task::sleep(delay).await; + tokio::time::sleep(delay).await; if delay * SCOUT_PERIOD_INCREASE_FACTOR <= SCOUT_MAX_PERIOD { delay *= SCOUT_PERIOD_INCREASE_FACTOR; } @@ -597,7 +615,10 @@ impl Runtime { } .boxed() })); - async_std::prelude::FutureExt::race(send, recvs).await; + tokio::select! { + _ = send => {}, + _ = recvs => {}, + } } #[must_use] @@ -617,10 +638,11 @@ impl Runtime { let endpoint = locator.to_owned().into(); let manager = self.manager(); if is_multicast { - match manager - .open_transport_multicast(endpoint) - .timeout(CONNECTION_TIMEOUT) - .await + match tokio::time::timeout( + CONNECTION_TIMEOUT, + manager.open_transport_multicast(endpoint), + ) + .await { Ok(Ok(transport)) => { log::debug!( @@ -633,10 +655,11 @@ impl Runtime { Err(e) => log::trace!("{} {} on {}: {}", ERR, zid, locator, e), } } else { - match manager - .open_transport_unicast(endpoint) - .timeout(CONNECTION_TIMEOUT) - .await + match tokio::time::timeout( + CONNECTION_TIMEOUT, + manager.open_transport_unicast(endpoint), + ) + .await { Ok(Ok(transport)) => { log::debug!( @@ -707,10 +730,13 @@ impl Runtime { Ok(()) }; let timeout = async { - async_std::task::sleep(timeout).await; + tokio::time::sleep(timeout).await; bail!("timeout") }; - async_std::prelude::FutureExt::race(scout, timeout).await + tokio::select! { + res = scout => { res }, + res = timeout => { res } + } } async fn connect_all( @@ -819,7 +845,7 @@ impl Runtime { session.runtime.spawn(async move { let mut delay = CONNECTION_RETRY_INITIAL_PERIOD; while runtime.start_client().await.is_err() { - async_std::task::sleep(delay).await; + tokio::time::sleep(delay).await; delay *= CONNECTION_RETRY_PERIOD_INCREASE_FACTOR; if delay > CONNECTION_RETRY_MAX_PERIOD { delay = CONNECTION_RETRY_MAX_PERIOD; diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index d54a77b793..ef33115a6b 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -43,7 +43,8 @@ pub use zenoh_protocol::core::CongestionControl; /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh::publication::CongestionControl; /// @@ -53,7 +54,7 @@ pub use zenoh_protocol::core::CongestionControl; /// .res() /// .await /// .unwrap(); -/// # }) +/// # } /// ``` pub type DeleteBuilder<'a, 'b> = PutBuilder<'a, 'b>; @@ -61,7 +62,8 @@ pub type DeleteBuilder<'a, 'b> = PutBuilder<'a, 'b>; /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh::publication::CongestionControl; /// @@ -73,7 +75,7 @@ pub type DeleteBuilder<'a, 'b> = PutBuilder<'a, 'b>; /// .res() /// .await /// .unwrap(); -/// # }) +/// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug, Clone)] @@ -213,20 +215,22 @@ impl std::fmt::Debug for PublisherRef<'_> { /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); /// publisher.put("value").res().await.unwrap(); -/// # }) +/// # } /// ``` /// /// /// `Publisher` implements the `Sink` trait which is useful to forward /// streams to zenoh. /// ```no_run -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use futures::StreamExt; /// use zenoh::prelude::r#async::*; /// @@ -234,7 +238,7 @@ impl std::fmt::Debug for PublisherRef<'_> { /// let mut subscriber = session.declare_subscriber("key/expression").res().await.unwrap(); /// let publisher = session.declare_publisher("another/key/expression").res().await.unwrap(); /// subscriber.stream().map(Ok).forward(publisher).await.unwrap(); -/// # }) +/// # } /// ``` #[derive(Debug, Clone)] pub struct Publisher<'a> { @@ -285,14 +289,15 @@ impl<'a> Publisher<'a> { /// /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression").res().await.unwrap().into_arc(); /// let matching_listener = publisher.matching_listener().res().await.unwrap(); /// - /// async_std::task::spawn(async move { + /// tokio::task::spawn(async move { /// while let Ok(matching_status) = matching_listener.recv_async().await { /// if matching_status.matching_subscribers() { /// println!("Publisher has matching subscribers."); @@ -301,7 +306,7 @@ impl<'a> Publisher<'a> { /// } /// } /// }).await; - /// # }) + /// # } /// ``` #[zenoh_macros::unstable] pub fn into_arc(self) -> std::sync::Arc { @@ -322,13 +327,14 @@ impl<'a> Publisher<'a> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); /// publisher.put("value").res().await.unwrap(); - /// # }) + /// # } /// ``` #[inline] pub fn put(&self, value: IntoValue) -> Publication @@ -342,13 +348,14 @@ impl<'a> Publisher<'a> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); /// publisher.delete().res().await.unwrap(); - /// # }) + /// # } /// ``` pub fn delete(&self) -> Publication { self._write(SampleKind::Delete, Value::empty()) @@ -361,7 +368,8 @@ impl<'a> Publisher<'a> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); @@ -372,7 +380,7 @@ impl<'a> Publisher<'a> { /// .await /// .unwrap() /// .matching_subscribers(); - /// # }) + /// # } /// ``` #[zenoh_macros::unstable] pub fn matching_status(&self) -> impl Resolve> + '_ { @@ -389,7 +397,8 @@ impl<'a> Publisher<'a> { /// /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -402,7 +411,7 @@ impl<'a> Publisher<'a> { /// println!("Publisher has NO MORE matching subscribers."); /// } /// } - /// # }) + /// # } /// ``` #[zenoh_macros::unstable] pub fn matching_listener(&self) -> MatchingListenerBuilder<'_, DefaultHandler> { @@ -416,13 +425,14 @@ impl<'a> Publisher<'a> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); /// publisher.undeclare().res().await.unwrap(); - /// # }) + /// # } /// ``` pub fn undeclare(self) -> impl Resolve> + 'a { Undeclarable::undeclare_inner(self, ()) @@ -449,14 +459,15 @@ impl<'a> HasWriteWithSampleKind for Publisher<'a> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh::publication::HasWriteWithSampleKind; /// /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); /// publisher.write(SampleKind::Put, "value").res().await.unwrap(); - /// # }) + /// # } /// ``` fn write(&self, kind: SampleKind, value: IntoValue) -> Self::WriteOutput<'_> where @@ -476,14 +487,15 @@ impl<'a> HasWriteWithSampleKind for Publisher<'a> { /// /// # Examples /// ```no_run -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression").res().await.unwrap().into_arc(); /// let matching_listener = publisher.matching_listener().res().await.unwrap(); /// -/// async_std::task::spawn(async move { +/// tokio::task::spawn(async move { /// while let Ok(matching_status) = matching_listener.recv_async().await { /// if matching_status.matching_subscribers() { /// println!("Publisher has matching subscribers."); @@ -492,20 +504,21 @@ impl<'a> HasWriteWithSampleKind for Publisher<'a> { /// } /// } /// }).await; -/// # }) +/// # } /// ``` #[zenoh_macros::unstable] pub trait PublisherDeclarations { /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression").res().await.unwrap().into_arc(); /// let matching_listener = publisher.matching_listener().res().await.unwrap(); /// - /// async_std::task::spawn(async move { + /// tokio::task::spawn(async move { /// while let Ok(matching_status) = matching_listener.recv_async().await { /// if matching_status.matching_subscribers() { /// println!("Publisher has matching subscribers."); @@ -514,7 +527,7 @@ pub trait PublisherDeclarations { /// } /// } /// }).await; - /// # }) + /// # } /// ``` #[zenoh_macros::unstable] fn matching_listener(&self) -> MatchingListenerBuilder<'static, DefaultHandler>; @@ -524,14 +537,15 @@ pub trait PublisherDeclarations { impl PublisherDeclarations for std::sync::Arc> { /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression").res().await.unwrap().into_arc(); /// let matching_listener = publisher.matching_listener().res().await.unwrap(); /// - /// async_std::task::spawn(async move { + /// tokio::task::spawn(async move { /// while let Ok(matching_status) = matching_listener.recv_async().await { /// if matching_status.matching_subscribers() { /// println!("Publisher has matching subscribers."); @@ -540,7 +554,7 @@ impl PublisherDeclarations for std::sync::Arc> { /// } /// } /// }).await; - /// # }) + /// # } /// ``` #[zenoh_macros::unstable] fn matching_listener(&self) -> MatchingListenerBuilder<'static, DefaultHandler> { @@ -561,13 +575,14 @@ impl<'a> Undeclarable<(), PublisherUndeclaration<'a>> for Publisher<'a> { /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); /// publisher.undeclare().res().await.unwrap(); -/// # }) +/// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] pub struct PublisherUndeclaration<'a> { @@ -684,7 +699,8 @@ where /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh::publication::CongestionControl; /// @@ -695,7 +711,7 @@ where /// .res() /// .await /// .unwrap(); -/// # }) +/// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] @@ -985,13 +1001,14 @@ impl TryFrom for Priority { /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); /// let matching_status = publisher.matching_status().res().await.unwrap(); -/// # }) +/// # } /// ``` #[zenoh_macros::unstable] #[derive(Copy, Clone, Debug)] @@ -1005,7 +1022,8 @@ impl MatchingStatus { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); @@ -1016,7 +1034,7 @@ impl MatchingStatus { /// .await /// .unwrap() /// .matching_subscribers(); - /// # }) + /// # } /// ``` pub fn matching_subscribers(&self) -> bool { self.matching @@ -1037,7 +1055,8 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -1054,7 +1073,7 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { /// .res() /// .await /// .unwrap(); - /// # }) + /// # } /// ``` #[inline] #[zenoh_macros::unstable] @@ -1076,7 +1095,8 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let mut n = 0; @@ -1088,7 +1108,7 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { /// .res() /// .await /// .unwrap(); - /// # }) + /// # } /// ``` #[inline] #[zenoh_macros::unstable] @@ -1106,7 +1126,8 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { /// /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -1124,7 +1145,7 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { /// println!("Publisher has NO MORE matching subscribers."); /// } /// } - /// # }) + /// # } /// ``` #[inline] #[zenoh_macros::unstable] @@ -1232,7 +1253,8 @@ impl<'a> Undeclarable<(), MatchingListenerUndeclaration<'a>> for MatchingListene /// /// # Examples /// ```no_run -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -1245,7 +1267,7 @@ impl<'a> Undeclarable<(), MatchingListenerUndeclaration<'a>> for MatchingListene /// println!("Publisher has NO MORE matching subscribers."); /// } /// } -/// # }) +/// # } /// ``` #[zenoh_macros::unstable] pub struct MatchingListener<'a, Receiver> { @@ -1262,14 +1284,15 @@ impl<'a, Receiver> MatchingListener<'a, Receiver> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); /// let matching_listener = publisher.matching_listener().res().await.unwrap(); /// matching_listener.undeclare().res().await.unwrap(); - /// # }) + /// # } /// ``` #[inline] pub fn undeclare(self) -> MatchingListenerUndeclaration<'a> { diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index c4f3fb35e9..f75df8c50e 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -99,7 +99,8 @@ pub(crate) struct QueryState { /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh::query::*; /// @@ -114,7 +115,7 @@ pub(crate) struct QueryState { /// while let Ok(reply) = replies.recv_async().await { /// println!("Received {:?}", reply.sample) /// } -/// # }) +/// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] @@ -137,7 +138,8 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -147,7 +149,7 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { /// .res() /// .await /// .unwrap(); - /// # }) + /// # } /// ``` #[inline] pub fn callback(self, callback: Callback) -> GetBuilder<'a, 'b, Callback> @@ -189,7 +191,8 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -200,7 +203,7 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { /// .res() /// .await /// .unwrap(); - /// # }) + /// # } /// ``` #[inline] pub fn callback_mut( @@ -217,7 +220,8 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -230,7 +234,7 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { /// while let Ok(reply) = replies.recv_async().await { /// println!("Received {:?}", reply.sample); /// } - /// # }) + /// # } /// ``` #[inline] pub fn with(self, handler: Handler) -> GetBuilder<'a, 'b, Handler> diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 5114ef6570..751e454610 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -325,7 +325,8 @@ impl fmt::Debug for QueryableState { /// /// # Examples /// ```no_run -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use futures::prelude::*; /// use zenoh::prelude::r#async::*; /// @@ -338,7 +339,7 @@ impl fmt::Debug for QueryableState { /// .await /// .unwrap(); /// } -/// # }) +/// # } /// ``` #[derive(Debug)] pub(crate) struct CallbackQueryable<'a> { @@ -357,13 +358,14 @@ impl<'a> Undeclarable<(), QueryableUndeclaration<'a>> for CallbackQueryable<'a> /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let queryable = session.declare_queryable("key/expression").res().await.unwrap(); /// queryable.undeclare().res().await.unwrap(); -/// # }) +/// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] pub struct QueryableUndeclaration<'a> { @@ -403,13 +405,14 @@ impl Drop for CallbackQueryable<'_> { /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh::queryable; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let queryable = session.declare_queryable("key/expression").res().await.unwrap(); -/// # }) +/// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] @@ -426,7 +429,8 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -436,7 +440,7 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { /// .res() /// .await /// .unwrap(); - /// # }) + /// # } /// ``` #[inline] pub fn callback(self, callback: Callback) -> QueryableBuilder<'a, 'b, Callback> @@ -466,7 +470,8 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -477,7 +482,7 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { /// .res() /// .await /// .unwrap(); - /// # }) + /// # } /// ``` #[inline] pub fn callback_mut( @@ -494,7 +499,8 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { /// /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -507,7 +513,7 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { /// while let Ok(query) = queryable.recv_async().await { /// println!(">> Handling query '{}'", query.selector()); /// } - /// # }) + /// # } /// ``` #[inline] pub fn with(self, handler: Handler) -> QueryableBuilder<'a, 'b, Handler> @@ -559,7 +565,8 @@ impl<'a, 'b, Handler> QueryableBuilder<'a, 'b, Handler> { /// /// # Examples /// ```no_run -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -576,7 +583,7 @@ impl<'a, 'b, Handler> QueryableBuilder<'a, 'b, Handler> { /// .await /// .unwrap(); /// } -/// # }) +/// # } /// ``` #[non_exhaustive] #[derive(Debug)] diff --git a/zenoh/src/scouting.rs b/zenoh/src/scouting.rs index ea09823ea1..ab5866e388 100644 --- a/zenoh/src/scouting.rs +++ b/zenoh/src/scouting.rs @@ -14,9 +14,9 @@ use crate::handlers::{locked, Callback, DefaultHandler}; use crate::net::runtime::{orchestrator::Loop, Runtime}; -use async_std::net::UdpSocket; use futures::StreamExt; use std::{fmt, future::Ready, net::SocketAddr, ops::Deref}; +use tokio::net::UdpSocket; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_protocol::core::WhatAmIMatcher; use zenoh_result::ZResult; @@ -31,7 +31,8 @@ pub use zenoh_protocol::scouting::Hello; /// /// # Examples /// ```no_run -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh::scouting::WhatAmI; /// @@ -42,7 +43,7 @@ pub use zenoh_protocol::scouting::Hello; /// while let Ok(hello) = receiver.recv_async().await { /// println!("{}", hello); /// } -/// # }) +/// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] @@ -57,7 +58,8 @@ impl ScoutBuilder { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh::scouting::WhatAmI; /// @@ -66,7 +68,7 @@ impl ScoutBuilder { /// .res() /// .await /// .unwrap(); - /// # }) + /// # } /// ``` #[inline] pub fn callback(self, callback: Callback) -> ScoutBuilder @@ -92,7 +94,8 @@ impl ScoutBuilder { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh::scouting::WhatAmI; /// @@ -102,7 +105,7 @@ impl ScoutBuilder { /// .res() /// .await /// .unwrap(); - /// # }) + /// # } /// ``` #[inline] pub fn callback_mut( @@ -119,7 +122,8 @@ impl ScoutBuilder { /// /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh::scouting::WhatAmI; /// @@ -131,7 +135,7 @@ impl ScoutBuilder { /// while let Ok(hello) = receiver.recv_async().await { /// println!("{}", hello); /// } - /// # }) + /// # } /// ``` #[inline] pub fn with(self, handler: Handler) -> ScoutBuilder @@ -186,7 +190,8 @@ where /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh::scouting::WhatAmI; /// @@ -195,7 +200,7 @@ where /// .res() /// .await /// .unwrap(); -/// # }) +/// # } /// ``` pub(crate) struct ScoutInner { #[allow(dead_code)] @@ -207,7 +212,8 @@ impl ScoutInner { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh::scouting::WhatAmI; /// @@ -217,7 +223,7 @@ impl ScoutInner { /// .await /// .unwrap(); /// scout.stop(); - /// # }) + /// # } /// ``` pub fn stop(self) { // This drops the inner `stop_sender` and hence stops the scouting receiver @@ -235,7 +241,8 @@ impl fmt::Debug for ScoutInner { /// /// # Examples /// ```no_run -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh::scouting::WhatAmI; /// @@ -247,7 +254,7 @@ impl fmt::Debug for ScoutInner { /// while let Ok(hello) = receiver.recv_async().await { /// println!("{}", hello); /// } -/// # }) +/// # } /// ``` #[non_exhaustive] #[derive(Debug)] @@ -269,7 +276,8 @@ impl Scout { /// /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh::scouting::WhatAmI; /// @@ -280,7 +288,7 @@ impl Scout { /// .unwrap(); /// let _router = scout.recv_async().await; /// scout.stop(); - /// # }) + /// # } /// ``` pub fn stop(self) { self.scout.stop() @@ -307,7 +315,7 @@ fn scout( .filter_map(|iface| Runtime::bind_ucast_port(iface).ok()) .collect(); if !sockets.is_empty() { - async_std::task::spawn(async move { + zenoh_runtime::ZRuntime::Net.spawn(async move { let mut stop_receiver = stop_receiver.stream(); let scout = Runtime::scout(&sockets, what, &addr, move |hello| { let callback = callback.clone(); @@ -320,7 +328,10 @@ fn scout( stop_receiver.next().await; log::trace!("stop scout({}, {})", what, &config); }; - async_std::prelude::FutureExt::race(scout, stop).await; + tokio::select! { + _ = scout => {}, + _ = stop => {}, + } }); } } diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 7900a3add8..7290d0aeac 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -40,7 +40,6 @@ use crate::Sample; use crate::SampleKind; use crate::Selector; use crate::Value; -use async_std::task; use log::{error, trace, warn}; use std::collections::HashMap; use std::convert::TryFrom; @@ -439,7 +438,8 @@ impl Session { /// /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); @@ -447,12 +447,12 @@ impl Session { /// .res() /// .await /// .unwrap(); - /// async_std::task::spawn(async move { + /// tokio::task::spawn(async move { /// while let Ok(sample) = subscriber.recv_async().await { /// println!("Received: {:?}", sample); /// } /// }).await; - /// # }) + /// # } /// ``` pub fn into_arc(self) -> Arc { Arc::new(self) @@ -472,18 +472,19 @@ impl Session { /// /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh::Session; /// /// let session = Session::leak(zenoh::open(config::peer()).res().await.unwrap()); /// let subscriber = session.declare_subscriber("key/expression").res().await.unwrap(); - /// async_std::task::spawn(async move { + /// tokio::task::spawn(async move { /// while let Ok(sample) = subscriber.recv_async().await { /// println!("Received: {:?}", sample); /// } /// }).await; - /// # }) + /// # } /// ``` pub fn leak(s: Self) -> &'static mut Self { Box::leak(Box::new(s)) @@ -506,12 +507,13 @@ impl Session { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session.close().res().await.unwrap(); - /// # }) + /// # } /// ``` pub fn close(self) -> impl Resolve> { ResolveFuture::new(async move { @@ -543,22 +545,24 @@ impl Session { /// # Examples /// ### Read current zenoh configuration /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let peers = session.config().get("connect/endpoints").unwrap(); - /// # }) + /// # } /// ``` /// /// ### Modify current zenoh configuration /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let _ = session.config().insert_json5("connect/endpoints", r#"["tcp/127.0.0.1/7447"]"#); - /// # }) + /// # } /// ``` pub fn config(&self) -> &Notifier { self.runtime.config() @@ -613,12 +617,13 @@ impl Session { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let key_expr = session.declare_keyexpr("key/expression").res().await.unwrap(); - /// # }) + /// # } /// ``` pub fn declare_keyexpr<'a, 'b: 'a, TryIntoKeyExpr>( &'a self, @@ -674,7 +679,8 @@ impl Session { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -684,7 +690,7 @@ impl Session { /// .res() /// .await /// .unwrap(); - /// # }) + /// # } /// ``` #[inline] pub fn put<'a, 'b: 'a, TryIntoKeyExpr, IntoValue>( @@ -714,12 +720,13 @@ impl Session { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session.delete("key/expression").res().await.unwrap(); - /// # }) + /// # } /// ``` #[inline] pub fn delete<'a, 'b: 'a, TryIntoKeyExpr>( @@ -749,7 +756,8 @@ impl Session { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -757,7 +765,7 @@ impl Session { /// while let Ok(reply) = replies.recv_async().await { /// println!(">> Received {:?}", reply.sample); /// } - /// # }) + /// # } /// ``` pub fn get<'a, 'b: 'a, IntoSelector>( &'a self, @@ -816,7 +824,8 @@ impl Session { match runtime.start().await { Ok(()) => { // Workaround for the declare_and_shoot problem - task::sleep(Duration::from_millis(*API_OPEN_SESSION_DELAY)).await; + tokio::time::sleep(Duration::from_millis(*API_OPEN_SESSION_DELAY)) + .await; Ok(session) } Err(err) => Err(err), @@ -1489,7 +1498,8 @@ impl Session { for msub in state.matching_listeners.values() { if key_expr.intersects(&msub.key_expr) { // Cannot hold session lock when calling tables (matching_status()) - async_std::task::spawn({ + // TODO: check which ZRuntime should be used + zenoh_runtime::ZRuntime::RX.spawn({ let session = self.clone(); let msub = msub.clone(); async move { @@ -1522,7 +1532,8 @@ impl Session { for msub in state.matching_listeners.values() { if key_expr.intersects(&msub.key_expr) { // Cannot hold session lock when calling tables (matching_status()) - async_std::task::spawn({ + // TODO: check which ZRuntime should be used + zenoh_runtime::ZRuntime::RX.spawn({ let session = self.clone(); let msub = msub.clone(); async move { @@ -1740,11 +1751,12 @@ impl Session { Locality::Any => 2, _ => 1, }; - task::spawn({ + + zenoh_runtime::ZRuntime::Net.spawn({ let state = self.state.clone(); let zid = self.runtime.zid(); async move { - task::sleep(timeout).await; + tokio::time::sleep(timeout).await; let mut state = zwrite!(state); if let Some(query) = state.queries.remove(&qid) { std::mem::drop(state); @@ -1931,7 +1943,8 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); @@ -1939,12 +1952,12 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// .res() /// .await /// .unwrap(); - /// async_std::task::spawn(async move { + /// tokio::task::spawn(async move { /// while let Ok(sample) = subscriber.recv_async().await { /// println!("Received: {:?}", sample); /// } /// }).await; - /// # }) + /// # } /// ``` fn declare_subscriber<'b, TryIntoKeyExpr>( &'s self, @@ -1973,7 +1986,8 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); @@ -1981,7 +1995,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// .res() /// .await /// .unwrap(); - /// async_std::task::spawn(async move { + /// tokio::task::spawn(async move { /// while let Ok(query) = queryable.recv_async().await { /// query.reply(Ok(Sample::try_from( /// "key/expression", @@ -1989,7 +2003,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// ).unwrap())).res().await.unwrap(); /// } /// }).await; - /// # }) + /// # } /// ``` fn declare_queryable<'b, TryIntoKeyExpr>( &'s self, @@ -2016,7 +2030,8 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); @@ -2025,7 +2040,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// .await /// .unwrap(); /// publisher.put("value").res().await.unwrap(); - /// # }) + /// # } /// ``` fn declare_publisher<'b, TryIntoKeyExpr>( &'s self, @@ -2048,7 +2063,8 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); @@ -2058,7 +2074,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// .res() /// .await /// .unwrap(); - /// # }) + /// # } /// ``` #[zenoh_macros::unstable] fn liveliness(&'s self) -> Liveliness<'static> { @@ -2505,7 +2521,8 @@ impl fmt::Debug for Session { /// /// # Examples /// ```no_run -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); @@ -2513,12 +2530,12 @@ impl fmt::Debug for Session { /// .res() /// .await /// .unwrap(); -/// async_std::task::spawn(async move { +/// tokio::task::spawn(async move { /// while let Ok(sample) = subscriber.recv_async().await { /// println!("Received: {:?}", sample); /// } /// }).await; -/// # }) +/// # } /// ``` pub trait SessionDeclarations<'s, 'a> { /// Create a [`Subscriber`](crate::subscriber::Subscriber) for the given key expression. @@ -2529,7 +2546,8 @@ pub trait SessionDeclarations<'s, 'a> { /// /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); @@ -2537,12 +2555,12 @@ pub trait SessionDeclarations<'s, 'a> { /// .res() /// .await /// .unwrap(); - /// async_std::task::spawn(async move { + /// tokio::task::spawn(async move { /// while let Ok(sample) = subscriber.recv_async().await { /// println!("Received: {:?}", sample); /// } /// }).await; - /// # }) + /// # } /// ``` fn declare_subscriber<'b, TryIntoKeyExpr>( &'s self, @@ -2561,7 +2579,8 @@ pub trait SessionDeclarations<'s, 'a> { /// /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); @@ -2569,7 +2588,7 @@ pub trait SessionDeclarations<'s, 'a> { /// .res() /// .await /// .unwrap(); - /// async_std::task::spawn(async move { + /// tokio::task::spawn(async move { /// while let Ok(query) = queryable.recv_async().await { /// query.reply(Ok(Sample::try_from( /// "key/expression", @@ -2577,7 +2596,7 @@ pub trait SessionDeclarations<'s, 'a> { /// ).unwrap())).res().await.unwrap(); /// } /// }).await; - /// # }) + /// # } /// ``` fn declare_queryable<'b, TryIntoKeyExpr>( &'s self, @@ -2595,7 +2614,8 @@ pub trait SessionDeclarations<'s, 'a> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); @@ -2604,7 +2624,7 @@ pub trait SessionDeclarations<'s, 'a> { /// .await /// .unwrap(); /// publisher.put("value").res().await.unwrap(); - /// # }) + /// # } /// ``` fn declare_publisher<'b, TryIntoKeyExpr>( &'s self, @@ -2618,7 +2638,8 @@ pub trait SessionDeclarations<'s, 'a> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); @@ -2628,7 +2649,7 @@ pub trait SessionDeclarations<'s, 'a> { /// .res() /// .await /// .unwrap(); - /// # }) + /// # } /// ``` #[zenoh_macros::unstable] fn liveliness(&'s self) -> Liveliness<'a>; @@ -2636,12 +2657,13 @@ pub trait SessionDeclarations<'s, 'a> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let info = session.info(); - /// # }) + /// # } /// ``` fn info(&'s self) -> SessionInfo<'a>; } diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index f3fde38d20..dc53120fff 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -59,7 +59,8 @@ impl fmt::Debug for SubscriberState { /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -69,7 +70,7 @@ impl fmt::Debug for SubscriberState { /// .res() /// .await /// .unwrap(); -/// # }) +/// # } /// ``` #[derive(Debug)] pub(crate) struct SubscriberInner<'a> { @@ -92,7 +93,8 @@ pub(crate) struct SubscriberInner<'a> { /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -104,7 +106,7 @@ pub(crate) struct SubscriberInner<'a> { /// .await /// .unwrap(); /// subscriber.pull(); -/// # }) +/// # } /// ``` pub(crate) struct PullSubscriberInner<'a> { inner: SubscriberInner<'a>, @@ -115,7 +117,8 @@ impl<'a> PullSubscriberInner<'a> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh::subscriber::SubMode; /// @@ -128,7 +131,7 @@ impl<'a> PullSubscriberInner<'a> { /// .await /// .unwrap(); /// subscriber.pull(); - /// # }) + /// # } /// ``` #[inline] pub fn pull(&self) -> impl Resolve> + '_ { @@ -142,7 +145,8 @@ impl<'a> PullSubscriberInner<'a> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -155,7 +159,7 @@ impl<'a> PullSubscriberInner<'a> { /// .await /// .unwrap(); /// subscriber.undeclare().res().await.unwrap(); - /// # }) + /// # } /// ``` #[inline] pub fn undeclare(self) -> impl Resolve> + 'a { @@ -171,7 +175,8 @@ impl<'a> SubscriberInner<'a> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -183,7 +188,7 @@ impl<'a> SubscriberInner<'a> { /// .await /// .unwrap(); /// subscriber.undeclare().res().await.unwrap(); - /// # }) + /// # } /// ``` #[inline] pub fn undeclare(self) -> SubscriberUndeclaration<'a> { @@ -201,7 +206,8 @@ impl<'a> Undeclarable<(), SubscriberUndeclaration<'a>> for SubscriberInner<'a> { /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -211,7 +217,7 @@ impl<'a> Undeclarable<(), SubscriberUndeclaration<'a>> for SubscriberInner<'a> { /// .await /// .unwrap(); /// subscriber.undeclare().res().await.unwrap(); -/// # }) +/// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] pub struct SubscriberUndeclaration<'a> { @@ -285,7 +291,8 @@ impl From for Mode { /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -296,7 +303,7 @@ impl From for Mode { /// .res() /// .await /// .unwrap(); -/// # }) +/// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] @@ -337,7 +344,8 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -347,7 +355,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { /// .res() /// .await /// .unwrap(); - /// # }) + /// # } /// ``` #[inline] pub fn callback(self, callback: Callback) -> SubscriberBuilder<'a, 'b, Mode, Callback> @@ -379,7 +387,8 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -390,7 +399,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { /// .res() /// .await /// .unwrap(); - /// # }) + /// # } /// ``` #[inline] pub fn callback_mut( @@ -407,7 +416,8 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { /// /// # Examples /// ```no_run - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -420,7 +430,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { /// while let Ok(sample) = subscriber.recv_async().await { /// println!("Received: {} {}", sample.key_expr, sample.value); /// } - /// # }) + /// # } /// ``` #[inline] pub fn with(self, handler: Handler) -> SubscriberBuilder<'a, 'b, Mode, Handler> @@ -636,7 +646,8 @@ where /// /// # Examples /// ```no_run -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -649,7 +660,7 @@ where /// while let Ok(sample) = subscriber.recv_async().await { /// println!("Received: {} {}", sample.key_expr, sample.value); /// } -/// # }) +/// # } /// ``` #[non_exhaustive] #[derive(Debug)] @@ -672,7 +683,8 @@ pub struct Subscriber<'a, Receiver> { /// /// # Examples /// ``` -/// # async_std::task::block_on(async { +/// # #[tokio::main] +/// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -684,7 +696,7 @@ pub struct Subscriber<'a, Receiver> { /// .await /// .unwrap(); /// subscriber.pull(); -/// # }) +/// # } /// ``` #[non_exhaustive] pub struct PullSubscriber<'a, Receiver> { @@ -710,7 +722,8 @@ impl<'a, Receiver> PullSubscriber<'a, Receiver> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh::subscriber::SubMode; /// @@ -723,7 +736,7 @@ impl<'a, Receiver> PullSubscriber<'a, Receiver> { /// .await /// .unwrap(); /// subscriber.pull(); - /// # }) + /// # } /// ``` #[inline] pub fn pull(&self) -> impl Resolve> + '_ { @@ -737,7 +750,8 @@ impl<'a, Receiver> PullSubscriber<'a, Receiver> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -747,7 +761,7 @@ impl<'a, Receiver> PullSubscriber<'a, Receiver> { /// .await /// .unwrap(); /// subscriber.undeclare().res().await.unwrap(); - /// # }) + /// # } /// ``` #[inline] pub fn undeclare(self) -> impl Resolve> + 'a { @@ -768,7 +782,8 @@ impl<'a, Receiver> Subscriber<'a, Receiver> { /// /// # Examples /// ``` - /// # async_std::task::block_on(async { + /// # #[tokio::main] + /// # async fn main() { /// use zenoh::prelude::r#async::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); @@ -777,7 +792,7 @@ impl<'a, Receiver> Subscriber<'a, Receiver> { /// .await /// .unwrap(); /// subscriber.undeclare().res().await.unwrap(); - /// # }) + /// # } /// ``` #[inline] pub fn undeclare(self) -> SubscriberUndeclaration<'a> { diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index 0ea775784a..6b2790e151 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -11,21 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::prelude::FutureExt; -use async_std::task; use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh::query::Reply; -use zenoh_core::zasync_executor_init; +use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(10); -macro_rules! ztimeout { - ($f:expr) => { - $f.timeout(TIMEOUT).await.unwrap() - }; -} - async fn open_session(listen: &[&str], connect: &[&str]) -> Session { let mut config = config::peer(); config.listen.endpoints = listen @@ -46,77 +38,73 @@ async fn close_session(session: Session) { ztimeout!(session.close().res_async()).unwrap(); } -#[test] -fn zenoh_events() { - task::block_on(async { - zasync_executor_init!(); - - let session = open_session(&["tcp/127.0.0.1:18447"], &[]).await; - let zid = session.zid(); - let sub1 = session - .declare_subscriber(format!("@/session/{zid}/transport/unicast/*")) - .res() - .await - .unwrap(); - let sub2 = session - .declare_subscriber(format!("@/session/{zid}/transport/unicast/*/link/*")) - .res() - .await - .unwrap(); +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn zenoh_events() { + let session = open_session(&["tcp/127.0.0.1:18447"], &[]).await; + let zid = session.zid(); + let sub1 = session + .declare_subscriber(format!("@/session/{zid}/transport/unicast/*")) + .res() + .await + .unwrap(); + let sub2 = session + .declare_subscriber(format!("@/session/{zid}/transport/unicast/*/link/*")) + .res() + .await + .unwrap(); - let session2 = open_session(&["tcp/127.0.0.1:18448"], &["tcp/127.0.0.1:18447"]).await; - let zid2 = session2.zid(); + let session2 = open_session(&["tcp/127.0.0.1:18448"], &["tcp/127.0.0.1:18447"]).await; + let zid2 = session2.zid(); - let sample = ztimeout!(sub1.recv_async()); - assert!(sample.is_ok()); - let key_expr = sample.as_ref().unwrap().key_expr.as_str(); - assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); - assert!(sample.as_ref().unwrap().kind == SampleKind::Put); + let sample = ztimeout!(sub1.recv_async()); + assert!(sample.is_ok()); + let key_expr = sample.as_ref().unwrap().key_expr.as_str(); + assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); + assert!(sample.as_ref().unwrap().kind == SampleKind::Put); - let sample = ztimeout!(sub2.recv_async()); - assert!(sample.is_ok()); - let key_expr = sample.as_ref().unwrap().key_expr.as_str(); - assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); - assert!(sample.as_ref().unwrap().kind == SampleKind::Put); + let sample = ztimeout!(sub2.recv_async()); + assert!(sample.is_ok()); + let key_expr = sample.as_ref().unwrap().key_expr.as_str(); + assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); + assert!(sample.as_ref().unwrap().kind == SampleKind::Put); - let replies: Vec = ztimeout!(session - .get(format!("@/session/{zid}/transport/unicast/*")) - .res_async()) - .unwrap() - .into_iter() - .collect(); - assert!(replies.len() == 1); - assert!(replies[0].sample.is_ok()); - let key_expr = replies[0].sample.as_ref().unwrap().key_expr.as_str(); - assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); + let replies: Vec = ztimeout!(session + .get(format!("@/session/{zid}/transport/unicast/*")) + .res_async()) + .unwrap() + .into_iter() + .collect(); + assert!(replies.len() == 1); + assert!(replies[0].sample.is_ok()); + let key_expr = replies[0].sample.as_ref().unwrap().key_expr.as_str(); + assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); - let replies: Vec = ztimeout!(session - .get(format!("@/session/{zid}/transport/unicast/*/link/*")) - .res_async()) - .unwrap() - .into_iter() - .collect(); - assert!(replies.len() == 1); - assert!(replies[0].sample.is_ok()); - let key_expr = replies[0].sample.as_ref().unwrap().key_expr.as_str(); - assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); + let replies: Vec = ztimeout!(session + .get(format!("@/session/{zid}/transport/unicast/*/link/*")) + .res_async()) + .unwrap() + .into_iter() + .collect(); + assert!(replies.len() == 1); + assert!(replies[0].sample.is_ok()); + let key_expr = replies[0].sample.as_ref().unwrap().key_expr.as_str(); + assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); - close_session(session2).await; + close_session(session2).await; - let sample = ztimeout!(sub1.recv_async()); - assert!(sample.is_ok()); - let key_expr = sample.as_ref().unwrap().key_expr.as_str(); - assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); - assert!(sample.as_ref().unwrap().kind == SampleKind::Delete); + let sample = ztimeout!(sub1.recv_async()); + assert!(sample.is_ok()); + let key_expr = sample.as_ref().unwrap().key_expr.as_str(); + assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); + assert!(sample.as_ref().unwrap().kind == SampleKind::Delete); - let sample = ztimeout!(sub2.recv_async()); - assert!(sample.is_ok()); - let key_expr = sample.as_ref().unwrap().key_expr.as_str(); - assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); - assert!(sample.as_ref().unwrap().kind == SampleKind::Delete); + let sample = ztimeout!(sub2.recv_async()); + assert!(sample.is_ok()); + let key_expr = sample.as_ref().unwrap().key_expr.as_str(); + assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); + assert!(sample.as_ref().unwrap().kind == SampleKind::Delete); - sub2.undeclare().res().await.unwrap(); - sub1.undeclare().res().await.unwrap(); - close_session(session).await; - }); + sub2.undeclare().res().await.unwrap(); + sub1.undeclare().res().await.unwrap(); + close_session(session).await; } diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index 2a5c30e7b8..073d85566b 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -30,6 +30,7 @@ impl IntervalCounter { } fn get_middle(&self) -> u32 { + assert!(self.count > 0); self.total_time.as_millis() as u32 / self.count } @@ -68,6 +69,14 @@ fn downsampling_by_keyexpr_impl(egress: bool) { if !egress { config_sub.insert_json5("downsampling", &ds_cfg).unwrap(); } + config_sub + .insert_json5("listen/endpoints", r#"["tcp/127.0.0.1:38446"]"#) + .unwrap(); + config_sub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); let zenoh_sub = zenoh::open(config_sub).res().unwrap(); let counter_r100 = Arc::new(Mutex::new(IntervalCounter::new())); @@ -97,6 +106,14 @@ fn downsampling_by_keyexpr_impl(egress: bool) { if egress { config_pub.insert_json5("downsampling", &ds_cfg).unwrap(); } + config_pub + .insert_json5("connect/endpoints", r#"["tcp/127.0.0.1:38446"]"#) + .unwrap(); + config_pub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); let zenoh_pub = zenoh::open(config_pub).res().unwrap(); let publisher_r100 = zenoh_pub .declare_publisher("test/downsamples_by_keyexp/r100") @@ -113,13 +130,13 @@ fn downsampling_by_keyexpr_impl(egress: bool) { .res() .unwrap(); - let interval = std::time::Duration::from_millis(1); + // WARN(yuyuan): 2 ms is the limit of tokio + let interval = std::time::Duration::from_millis(2); let messages_count = 1000; for i in 0..messages_count { publisher_r100.put(format!("message {}", i)).res().unwrap(); publisher_r50.put(format!("message {}", i)).res().unwrap(); publisher_all.put(format!("message {}", i)).res().unwrap(); - std::thread::sleep(interval); } @@ -173,7 +190,7 @@ fn downsampling_by_interface_impl(egress: bool) { // declare subscriber let mut config_sub = Config::default(); config_sub - .insert_json5("listen/endpoints", r#"["tcp/127.0.0.1:7447"]"#) + .insert_json5("listen/endpoints", r#"["tcp/127.0.0.1:38447"]"#) .unwrap(); if !egress { config_sub.insert_json5("downsampling", &ds_cfg).unwrap(); @@ -201,7 +218,7 @@ fn downsampling_by_interface_impl(egress: bool) { // declare publisher let mut config_pub = Config::default(); config_pub - .insert_json5("connect/endpoints", r#"["tcp/127.0.0.1:7447"]"#) + .insert_json5("connect/endpoints", r#"["tcp/127.0.0.1:38447"]"#) .unwrap(); if egress { config_pub.insert_json5("downsampling", &ds_cfg).unwrap(); @@ -217,7 +234,8 @@ fn downsampling_by_interface_impl(egress: bool) { .res() .unwrap(); - let interval = std::time::Duration::from_millis(1); + // WARN(yuyuan): 2 ms is the limit of tokio + let interval = std::time::Duration::from_millis(2); let messages_count = 1000; for i in 0..messages_count { publisher_r100.put(format!("message {}", i)).res().unwrap(); diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index 96cca533df..b4b138d78f 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -11,87 +11,68 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::prelude::FutureExt; -use async_std::task; use std::time::Duration; use zenoh::prelude::r#async::*; -use zenoh_core::zasync_executor_init; +use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); -macro_rules! ztimeout { - ($f:expr) => { - $f.timeout(TIMEOUT).await.unwrap() - }; -} - #[cfg(feature = "unstable")] -#[test] -fn zenoh_liveliness() { - task::block_on(async { - zasync_executor_init!(); - - let mut c1 = config::peer(); - c1.listen - .set_endpoints(vec!["tcp/localhost:47447".parse().unwrap()]) - .unwrap(); - c1.scouting.multicast.set_enabled(Some(false)).unwrap(); - let session1 = ztimeout!(zenoh::open(c1).res_async()).unwrap(); - let mut c2 = config::peer(); - c2.connect - .set_endpoints(vec!["tcp/localhost:47447".parse().unwrap()]) - .unwrap(); - c2.scouting.multicast.set_enabled(Some(false)).unwrap(); - let session2 = ztimeout!(zenoh::open(c2).res_async()).unwrap(); - - let replies = ztimeout!(session2 - .liveliness() - .get("zenoh_liveliness_test") - .res_async()) +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn zenoh_liveliness() { + let mut c1 = config::peer(); + c1.listen + .set_endpoints(vec!["tcp/localhost:47447".parse().unwrap()]) .unwrap(); - assert!(replies.into_iter().count() == 0); - - let sub = ztimeout!(session2 - .liveliness() - .declare_subscriber("zenoh_liveliness_test") - .res_async()) + c1.scouting.multicast.set_enabled(Some(false)).unwrap(); + let session1 = ztimeout!(zenoh::open(c1).res_async()).unwrap(); + let mut c2 = config::peer(); + c2.connect + .set_endpoints(vec!["tcp/localhost:47447".parse().unwrap()]) .unwrap(); + c2.scouting.multicast.set_enabled(Some(false)).unwrap(); + let session2 = ztimeout!(zenoh::open(c2).res_async()).unwrap(); - let token = ztimeout!(session1 - .liveliness() - .declare_token("zenoh_liveliness_test") - .res_async()) - .unwrap(); + let sub = ztimeout!(session2 + .liveliness() + .declare_subscriber("zenoh_liveliness_test") + .res_async()) + .unwrap(); - task::sleep(SLEEP).await; + let token = ztimeout!(session1 + .liveliness() + .declare_token("zenoh_liveliness_test") + .res_async()) + .unwrap(); - let replies = ztimeout!(session2 - .liveliness() - .get("zenoh_liveliness_test") - .res_async()) - .unwrap(); - let sample = ztimeout!(replies.recv_async()).unwrap().sample.unwrap(); - assert!(sample.kind == SampleKind::Put); - assert!(sample.key_expr.as_str() == "zenoh_liveliness_test"); + tokio::time::sleep(SLEEP).await; - assert!(ztimeout!(replies.recv_async()).is_err()); + let replies = ztimeout!(session2 + .liveliness() + .get("zenoh_liveliness_test") + .res_async()) + .unwrap(); + let sample = ztimeout!(replies.recv_async()).unwrap().sample.unwrap(); + assert!(sample.kind == SampleKind::Put); + assert!(sample.key_expr.as_str() == "zenoh_liveliness_test"); - let sample = ztimeout!(sub.recv_async()).unwrap(); - assert!(sample.kind == SampleKind::Put); - assert!(sample.key_expr.as_str() == "zenoh_liveliness_test"); + assert!(ztimeout!(replies.recv_async()).is_err()); - drop(token); + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert!(sample.kind == SampleKind::Put); + assert!(sample.key_expr.as_str() == "zenoh_liveliness_test"); - task::sleep(SLEEP).await; + drop(token); - let replies = ztimeout!(session2 - .liveliness() - .get("zenoh_liveliness_test") - .res_async()) - .unwrap(); - assert!(ztimeout!(replies.recv_async()).is_err()); + tokio::time::sleep(SLEEP).await; + + let replies = ztimeout!(session2 + .liveliness() + .get("zenoh_liveliness_test") + .res_async()) + .unwrap(); + assert!(ztimeout!(replies.recv_async()).is_err()); - assert!(replies.try_recv().is_err()); - }); + assert!(replies.try_recv().is_err()); } diff --git a/zenoh/tests/matching.rs b/zenoh/tests/matching.rs index f36bf5481b..e56036f5de 100644 --- a/zenoh/tests/matching.rs +++ b/zenoh/tests/matching.rs @@ -11,23 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::prelude::FutureExt; -use async_std::task; use std::str::FromStr; use std::time::Duration; use zenoh::prelude::r#async::*; -use zenoh_core::zasync_executor_init; +use zenoh_core::ztimeout; use zenoh_result::ZResult as Result; const TIMEOUT: Duration = Duration::from_secs(60); const RECV_TIMEOUT: Duration = Duration::from_secs(1); -macro_rules! ztimeout { - ($f:expr) => { - $f.timeout(TIMEOUT).await.unwrap() - }; -} - #[cfg(feature = "unstable")] async fn create_session_pair(locator: &str) -> (Session, Session) { let config1 = { @@ -47,202 +39,190 @@ async fn create_session_pair(locator: &str) -> (Session, Session) { } #[cfg(feature = "unstable")] -#[test] -fn zenoh_matching_status_any() -> Result<()> { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn zenoh_matching_status_any() -> Result<()> { use flume::RecvTimeoutError; - task::block_on(async { - zasync_executor_init!(); + let (session1, session2) = create_session_pair("tcp/127.0.0.1:18001").await; - let (session1, session2) = create_session_pair("tcp/127.0.0.1:18001").await; + let publisher1 = ztimeout!(session1 + .declare_publisher("zenoh_matching_status_any_test") + .allowed_destination(Locality::Any) + .res_async()) + .unwrap(); - let publisher1 = ztimeout!(session1 - .declare_publisher("zenoh_matching_status_any_test") - .allowed_destination(Locality::Any) - .res_async()) - .unwrap(); + let matching_listener = ztimeout!(publisher1.matching_listener().res_async()).unwrap(); - let matching_listener = ztimeout!(publisher1.matching_listener().res_async()).unwrap(); + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); - assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(!matching_status.matching_subscribers()); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); - assert!(!matching_status.matching_subscribers()); + let sub = ztimeout!(session1 + .declare_subscriber("zenoh_matching_status_any_test") + .res_async()) + .unwrap(); - let sub = ztimeout!(session1 - .declare_subscriber("zenoh_matching_status_any_test") - .res_async()) - .unwrap(); + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); - let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); - assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(matching_status.matching_subscribers()); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); - assert!(matching_status.matching_subscribers()); + ztimeout!(sub.undeclare().res_async()).unwrap(); - ztimeout!(sub.undeclare().res_async()).unwrap(); + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); - let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); - assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(!matching_status.matching_subscribers()); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); - assert!(!matching_status.matching_subscribers()); + let sub = ztimeout!(session2 + .declare_subscriber("zenoh_matching_status_any_test") + .res_async()) + .unwrap(); - let sub = ztimeout!(session2 - .declare_subscriber("zenoh_matching_status_any_test") - .res_async()) - .unwrap(); + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); - let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); - assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(matching_status.matching_subscribers()); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); - assert!(matching_status.matching_subscribers()); + ztimeout!(sub.undeclare().res_async()).unwrap(); - ztimeout!(sub.undeclare().res_async()).unwrap(); + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); - let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); - assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); - - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); - assert!(!matching_status.matching_subscribers()); - Ok(()) - }) + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(!matching_status.matching_subscribers()); + Ok(()) } #[cfg(feature = "unstable")] -#[test] -fn zenoh_matching_status_remote() -> Result<()> { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn zenoh_matching_status_remote() -> Result<()> { use flume::RecvTimeoutError; - task::block_on(async { - zasync_executor_init!(); - - let session1 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); + let session1 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); - let session2 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); + let session2 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); - let publisher1 = ztimeout!(session1 - .declare_publisher("zenoh_matching_status_remote_test") - .allowed_destination(Locality::Remote) - .res_async()) - .unwrap(); + let publisher1 = ztimeout!(session1 + .declare_publisher("zenoh_matching_status_remote_test") + .allowed_destination(Locality::Remote) + .res_async()) + .unwrap(); - let matching_listener = ztimeout!(publisher1.matching_listener().res_async()).unwrap(); + let matching_listener = ztimeout!(publisher1.matching_listener().res_async()).unwrap(); - let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); - assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); - assert!(!matching_status.matching_subscribers()); + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(!matching_status.matching_subscribers()); - let sub = ztimeout!(session1 - .declare_subscriber("zenoh_matching_status_remote_test") - .res_async()) - .unwrap(); + let sub = ztimeout!(session1 + .declare_subscriber("zenoh_matching_status_remote_test") + .res_async()) + .unwrap(); - let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); - assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); - assert!(!matching_status.matching_subscribers()); + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(!matching_status.matching_subscribers()); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare().res_async()).unwrap(); - let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); - assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); - assert!(!matching_status.matching_subscribers()); + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(!matching_status.matching_subscribers()); - let sub = ztimeout!(session2 - .declare_subscriber("zenoh_matching_status_remote_test") - .res_async()) - .unwrap(); + let sub = ztimeout!(session2 + .declare_subscriber("zenoh_matching_status_remote_test") + .res_async()) + .unwrap(); - let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); - assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); - assert!(matching_status.matching_subscribers()); + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(matching_status.matching_subscribers()); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare().res_async()).unwrap(); - let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); - assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); - assert!(!matching_status.matching_subscribers()); + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(!matching_status.matching_subscribers()); - Ok(()) - }) + Ok(()) } #[cfg(feature = "unstable")] -#[test] -fn zenoh_matching_status_local() -> Result<()> { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn zenoh_matching_status_local() -> Result<()> { use flume::RecvTimeoutError; - task::block_on(async { - zasync_executor_init!(); - - let session1 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); + let session1 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); - let session2 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); + let session2 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); - let publisher1 = ztimeout!(session1 - .declare_publisher("zenoh_matching_status_local_test") - .allowed_destination(Locality::SessionLocal) - .res_async()) - .unwrap(); + let publisher1 = ztimeout!(session1 + .declare_publisher("zenoh_matching_status_local_test") + .allowed_destination(Locality::SessionLocal) + .res_async()) + .unwrap(); - let matching_listener = ztimeout!(publisher1.matching_listener().res_async()).unwrap(); + let matching_listener = ztimeout!(publisher1.matching_listener().res_async()).unwrap(); - let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); - assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); - assert!(!matching_status.matching_subscribers()); + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(!matching_status.matching_subscribers()); - let sub = ztimeout!(session1 - .declare_subscriber("zenoh_matching_status_local_test") - .res_async()) - .unwrap(); + let sub = ztimeout!(session1 + .declare_subscriber("zenoh_matching_status_local_test") + .res_async()) + .unwrap(); - let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); - assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); - assert!(matching_status.matching_subscribers()); + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(matching_status.matching_subscribers()); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare().res_async()).unwrap(); - let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); - assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); - assert!(!matching_status.matching_subscribers()); + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(!matching_status.matching_subscribers()); - let sub = ztimeout!(session2 - .declare_subscriber("zenoh_matching_status_local_test") - .res_async()) - .unwrap(); + let sub = ztimeout!(session2 + .declare_subscriber("zenoh_matching_status_local_test") + .res_async()) + .unwrap(); - let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); - assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); - assert!(!matching_status.matching_subscribers()); + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(!matching_status.matching_subscribers()); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare().res_async()).unwrap(); - let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); - assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); + let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); + assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); - assert!(!matching_status.matching_subscribers()); + let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + assert!(!matching_status.matching_subscribers()); - Ok(()) - }) + Ok(()) } diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 475d8d7a1b..0e28af0847 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -11,56 +11,45 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::prelude::FutureExt; -use async_std::task; use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh::{publication::Priority, SessionDeclarations}; -use zenoh_core::zasync_executor_init; +use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); -macro_rules! ztimeout { - ($f:expr) => { - $f.timeout(TIMEOUT).await.unwrap() - }; -} - -#[test] -fn pubsub() { - task::block_on(async { - zasync_executor_init!(); - let session1 = ztimeout!(zenoh::open(zenoh_config::peer()).res_async()).unwrap(); - let session2 = ztimeout!(zenoh::open(zenoh_config::peer()).res_async()).unwrap(); +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn pubsub() { + let session1 = ztimeout!(zenoh::open(zenoh_config::peer()).res_async()).unwrap(); + let session2 = ztimeout!(zenoh::open(zenoh_config::peer()).res_async()).unwrap(); - let publisher1 = ztimeout!(session1 - .declare_publisher("test/qos") - .priority(Priority::DataHigh) - .congestion_control(CongestionControl::Drop) - .res()) - .unwrap(); + let publisher1 = ztimeout!(session1 + .declare_publisher("test/qos") + .priority(Priority::DataHigh) + .congestion_control(CongestionControl::Drop) + .res()) + .unwrap(); - let publisher2 = ztimeout!(session1 - .declare_publisher("test/qos") - .priority(Priority::DataLow) - .congestion_control(CongestionControl::Block) - .res()) - .unwrap(); + let publisher2 = ztimeout!(session1 + .declare_publisher("test/qos") + .priority(Priority::DataLow) + .congestion_control(CongestionControl::Block) + .res()) + .unwrap(); - let subscriber = ztimeout!(session2.declare_subscriber("test/qos").res()).unwrap(); - task::sleep(SLEEP).await; + let subscriber = ztimeout!(session2.declare_subscriber("test/qos").res()).unwrap(); + tokio::time::sleep(SLEEP).await; - ztimeout!(publisher1.put("qos").res_async()).unwrap(); - let qos = ztimeout!(subscriber.recv_async()).unwrap().qos; + ztimeout!(publisher1.put("qos").res_async()).unwrap(); + let qos = ztimeout!(subscriber.recv_async()).unwrap().qos; - assert_eq!(qos.priority(), Priority::DataHigh); - assert_eq!(qos.congestion_control(), CongestionControl::Drop); + assert_eq!(qos.priority(), Priority::DataHigh); + assert_eq!(qos.congestion_control(), CongestionControl::Drop); - ztimeout!(publisher2.put("qos").res_async()).unwrap(); - let qos = ztimeout!(subscriber.recv_async()).unwrap().qos; + ztimeout!(publisher2.put("qos").res_async()).unwrap(); + let qos = ztimeout!(subscriber.recv_async()).unwrap().qos; - assert_eq!(qos.priority(), Priority::DataLow); - assert_eq!(qos.congestion_control(), CongestionControl::Block); - }); + assert_eq!(qos.priority(), Priority::DataLow); + assert_eq!(qos.congestion_control(), CongestionControl::Block); } diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 3b10f12f03..6c5afe0673 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -11,29 +11,27 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::prelude::FutureExt; -use futures::future::try_join_all; -use futures::FutureExt as _; -use std::str::FromStr; -use std::sync::atomic::Ordering; -use std::sync::{atomic::AtomicUsize, Arc}; -use std::time::Duration; -use zenoh::config::{Config, ModeDependentValue}; -use zenoh::prelude::r#async::*; -use zenoh::{value::Value, Result}; -use zenoh_core::zasync_executor_init; +use std::{ + str::FromStr, + sync::{atomic::AtomicUsize, atomic::Ordering, Arc}, + time::Duration, +}; +use tokio_util::{sync::CancellationToken, task::TaskTracker}; +use zenoh::{ + config::{Config, ModeDependentValue}, + prelude::r#async::*, + value::Value, + Result, +}; +use zenoh_core::ztimeout; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher}; -use zenoh_result::{bail, zerror}; +use zenoh_result::bail; -const TIMEOUT: Duration = Duration::from_secs(360); +const TIMEOUT: Duration = Duration::from_secs(10); const MSG_COUNT: usize = 50; const MSG_SIZE: [usize; 2] = [1_024, 131_072]; - -macro_rules! ztimeout { - ($f:expr) => { - $f.timeout(TIMEOUT).await? - }; -} +// Maximal recipes to run at once +const PARALLEL_RECIPES: usize = 4; #[derive(Debug, Clone, PartialEq, Eq)] enum Task { @@ -51,33 +49,50 @@ impl Task { &self, session: Arc, remaining_checkpoints: Arc, + token: CancellationToken, ) -> Result<()> { match self { // The Sub task checks if the incoming message matches the expected size until it receives enough counts. Self::Sub(ke, expected_size) => { let sub = ztimeout!(session.declare_subscriber(ke).res_async())?; let mut counter = 0; - while let Ok(sample) = sub.recv_async().await { - let recv_size = sample.value.payload.len(); - if recv_size != *expected_size { - bail!("Received payload size {recv_size} mismatches the expected {expected_size}"); - } - counter += 1; - if counter >= MSG_COUNT { - println!("Sub received sufficient amount of messages. Done."); - break; + loop { + tokio::select! { + _ = token.cancelled() => break, + res = sub.recv_async() => { + if let Ok(sample) = res { + let recv_size = sample.value.payload.len(); + if recv_size != *expected_size { + bail!("Received payload size {recv_size} mismatches the expected {expected_size}"); + } + counter += 1; + if counter >= MSG_COUNT { + println!("Sub received sufficient amount of messages. Done."); + break; + } + } + } } } + println!("Sub task done."); } // The Pub task keeps putting messages until all checkpoints are finished. Self::Pub(ke, payload_size) => { let value: Value = vec![0u8; *payload_size].into(); - while remaining_checkpoints.load(Ordering::Relaxed) > 0 { - ztimeout!(session - .put(ke, value.clone()) - .congestion_control(CongestionControl::Block) - .res_async())?; + // while remaining_checkpoints.load(Ordering::Relaxed) > 0 { + loop { + tokio::select! { + _ = token.cancelled() => break, + + // WARN: this won't yield after a timeout since the put is a blocking call + res = tokio::time::timeout(std::time::Duration::from_secs(1), session + .put(ke, value.clone()) + .congestion_control(CongestionControl::Block) + .res()) => { + let _ = res?; + } + } } println!("Pub task done."); } @@ -86,27 +101,31 @@ impl Task { Self::Get(ke, expected_size) => { let mut counter = 0; while counter < MSG_COUNT { - let replies = - ztimeout!(session.get(ke).timeout(Duration::from_secs(10)).res_async())?; - while let Ok(reply) = replies.recv_async().await { - match reply.sample { - Ok(sample) => { - let recv_size = sample.value.payload.len(); - if recv_size != *expected_size { - bail!("Received payload size {recv_size} mismatches the expected {expected_size}"); + tokio::select! { + _ = token.cancelled() => break, + replies = session.get(ke).timeout(Duration::from_secs(10)).res() => { + let replies = replies?; + while let Ok(reply) = replies.recv_async().await { + match reply.sample { + Ok(sample) => { + let recv_size = sample.value.payload.len(); + if recv_size != *expected_size { + bail!("Received payload size {recv_size} mismatches the expected {expected_size}"); + } + } + + Err(err) => { + log::warn!( + "Sample got from {} failed to unwrap! Error: {}.", + ke, + err + ); + continue; + } } - } - - Err(err) => { - log::warn!( - "Sample got from {} failed to unwrap! Error: {}.", - ke, - err - ); - continue; + counter += 1; } } - counter += 1; } } println!("Get got sufficient amount of messages. Done."); @@ -118,16 +137,11 @@ impl Task { let sample = Sample::try_from(ke.clone(), vec![0u8; *payload_size])?; loop { - futures::select! { + tokio::select! { + _ = token.cancelled() => break, query = queryable.recv_async() => { query?.reply(Ok(sample.clone())).res_async().await?; }, - - _ = async_std::task::sleep(Duration::from_millis(100)).fuse() => { - if remaining_checkpoints.load(Ordering::Relaxed) == 0 { - break; - } - } } } println!("Queryable task done."); @@ -135,24 +149,23 @@ impl Task { // Make the zenoh session sleep for a while. Self::Sleep(dur) => { - async_std::task::sleep(*dur).await; + tokio::time::sleep(*dur).await; } // Mark one checkpoint is finished. Self::Checkpoint => { if remaining_checkpoints.fetch_sub(1, Ordering::Relaxed) <= 1 { + token.cancel(); println!("The end of the recipe."); } } // Wait until all checkpoints are done Self::Wait => { - while remaining_checkpoints.load(Ordering::Relaxed) > 0 { - async_std::task::sleep(Duration::from_millis(100)).await; - } + token.cancelled().await; } } - Result::Ok(()) + Ok(()) } } @@ -205,6 +218,7 @@ impl Default for Node { #[derive(Debug, Clone)] struct Recipe { nodes: Vec, + token: CancellationToken, } // Display the Recipe as [NodeName1, NodeName2, ...] @@ -218,7 +232,10 @@ impl std::fmt::Display for Recipe { impl Recipe { fn new(nodes: impl IntoIterator) -> Self { let nodes = nodes.into_iter().collect(); - Self { nodes } + Self { + nodes, + token: CancellationToken::new(), + } } fn num_checkpoints(&self) -> usize { @@ -229,66 +246,79 @@ impl Recipe { let num_checkpoints = self.num_checkpoints(); let remaining_checkpoints = Arc::new(AtomicUsize::new(num_checkpoints)); println!( - "Recipe {} beging testing with {} checkpoint(s).", + "Recipe {} begin testing with {} checkpoint(s).", &self, &num_checkpoints ); + let mut recipe_join_set = tokio::task::JoinSet::new(); + // All concurrent tasks to run - let futures = self.nodes.clone().into_iter().map(move |node| { - let receipe_name = self.to_string(); + for node in self.nodes.clone() { // All nodes share the same checkpoint counter let remaining_checkpoints = remaining_checkpoints.clone(); + let token = self.token.clone(); + + let recipe_task = async move { + // Initiate + let session = { + // Load the config and build up a session + let config = { + let mut config = node.config.unwrap_or_default(); + config.set_mode(Some(node.mode)).unwrap(); + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + config + .listen + .set_endpoints(node.listen.iter().map(|x| x.parse().unwrap()).collect()) + .unwrap(); + config + .connect + .set_endpoints( + node.connect.iter().map(|x| x.parse().unwrap()).collect(), + ) + .unwrap(); + config + }; - async move { - // Load the config and build up a session - let config = { - let mut config = node.config.unwrap_or_default(); - config.set_mode(Some(node.mode)).unwrap(); - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - config - .listen - .set_endpoints(node.listen.iter().map(|x| x.parse().unwrap()).collect()) - .unwrap(); - config - .connect - .set_endpoints(node.connect.iter().map(|x| x.parse().unwrap()).collect()) - .unwrap(); - config - }; - - // Warmup before the session starts - async_std::task::sleep(node.warmup).await; - println!("Node: {} starting...", &node.name); + // Warmup before the session starts + tokio::time::sleep(node.warmup).await; + println!("Node: {} starting...", &node.name); - // In case of client can't connect to some peers/routers - let session = loop { - if let Ok(session) = zenoh::open(config.clone()).res_async().await { - break session.into_arc(); - } else { - async_std::task::sleep(Duration::from_secs(1)).await; + // In case of client can't connect to some peers/routers + loop { + if let Ok(session) = zenoh::open(config.clone()).res_async().await { + break session.into_arc(); + } else { + tokio::time::sleep(Duration::from_secs(1)).await; + } } }; - // Each node consists of a specified session associated with tasks to run - let node_tasks = node.con_task.into_iter().map(|seq_tasks| { + let mut node_join_set = tokio::task::JoinSet::new(); + for seq_tasks in node.con_task.into_iter() { + let token = token.clone(); + // The tasks share the same session and checkpoint counter let session = session.clone(); let remaining_checkpoints = remaining_checkpoints.clone(); - - async_std::task::spawn(async move { + node_join_set.spawn(async move { // Tasks in seq_tasks would execute serially - for t in seq_tasks { - t.run(session.clone(), remaining_checkpoints.clone()) - .await?; + for task in seq_tasks { + task.run( + session.clone(), + remaining_checkpoints.clone(), + token.clone(), + ) + .await?; } Result::Ok(()) - }) - }); + }); + } - // All tasks of the node run together - try_join_all(node_tasks.into_iter().map(async_std::task::spawn)) - .await - .map_err(|e| zerror!("The recipe {} failed due to {}", receipe_name, &e))?; + while let Some(res) = node_join_set.join_next().await { + res??; + } + // node_task_tracker.close(); + // node_task_tracker.wait().await; // Close the session once all the task assoicated with the node are done. Arc::try_unwrap(session) @@ -299,313 +329,218 @@ impl Recipe { println!("Node: {} is closed.", &node.name); Result::Ok(()) - } - }); + }; + recipe_join_set.spawn(recipe_task); + } // All tasks of the recipe run together - try_join_all(futures.into_iter().map(async_std::task::spawn)) - .timeout(TIMEOUT) - .await - .map_err(|e| format!("The recipe: {} failed due to {}", &self, e))??; + loop { + tokio::select! { + _ = tokio::time::sleep(TIMEOUT) => { + dbg!("Timeout"); + + // Termination + remaining_checkpoints.swap(0, Ordering::Relaxed); + self.token.cancel(); + while let Some(res) = recipe_join_set.join_next().await { + res??; + } + bail!("Timeout"); + }, + res = recipe_join_set.join_next() => { + if let Some(res) = res { + res??; + } else { + break + } + } + } + } + Ok(()) } } // Two peers connecting to a common node (either in router or peer mode) can discover each other. // And the message transmission should work even if the common node disappears after a while. -#[test] -fn gossip() -> Result<()> { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn gossip() -> Result<()> { env_logger::try_init().unwrap_or_default(); - async_std::task::block_on(async { - zasync_executor_init!(); - - let locator = String::from("tcp/127.0.0.1:17446"); - let ke = String::from("testKeyExprGossip"); - let msg_size = 8; - let peer1 = Node { - name: format!("Pub & Queryable {}", WhatAmI::Peer), - connect: vec![locator.clone()], - mode: WhatAmI::Peer, - con_task: ConcurrentTask::from([ - SequentialTask::from([ - Task::Sleep(Duration::from_millis(2000)), - Task::Pub(ke.clone(), msg_size), - ]), - SequentialTask::from([ - Task::Sleep(Duration::from_millis(2000)), - Task::Queryable(ke.clone(), msg_size), - ]), + let locator = String::from("tcp/127.0.0.1:17446"); + let ke = String::from("testKeyExprGossip"); + let msg_size = 8; + + // node1 in peer mode playing pub and queryable + let node1 = Node { + name: format!("Pub & Queryable {}", WhatAmI::Peer), + connect: vec![locator.clone()], + mode: WhatAmI::Peer, + con_task: ConcurrentTask::from([ + SequentialTask::from([ + Task::Sleep(Duration::from_millis(2000)), + Task::Pub(ke.clone(), msg_size), ]), - ..Default::default() - }; - let peer2 = Node { - name: format!("Sub & Get {}", WhatAmI::Peer), - mode: WhatAmI::Peer, - connect: vec![locator.clone()], - con_task: ConcurrentTask::from([ - SequentialTask::from([ - Task::Sleep(Duration::from_millis(2000)), - Task::Sub(ke.clone(), msg_size), - Task::Checkpoint, - ]), - SequentialTask::from([ - Task::Sleep(Duration::from_millis(2000)), - Task::Get(ke, msg_size), - Task::Checkpoint, - ]), + SequentialTask::from([ + Task::Sleep(Duration::from_millis(2000)), + Task::Queryable(ke.clone(), msg_size), + ]), + ]), + ..Default::default() + }; + // node2 in peer mode playing sub and get + let node2 = Node { + name: format!("Sub & Get {}", WhatAmI::Peer), + mode: WhatAmI::Peer, + connect: vec![locator.clone()], + con_task: ConcurrentTask::from([ + SequentialTask::from([ + Task::Sleep(Duration::from_millis(2000)), + Task::Sub(ke.clone(), msg_size), + Task::Checkpoint, + ]), + SequentialTask::from([ + Task::Sleep(Duration::from_millis(2000)), + Task::Get(ke, msg_size), + Task::Checkpoint, ]), + ]), + ..Default::default() + }; + + // Recipes: + // - node1: Peer, node2: Peer, node3: Peer + // - node1: Peer, node2: Peer, node3: Router + for mode in [WhatAmI::Peer, WhatAmI::Router] { + let node3 = Node { + name: format!("Router {}", mode), + mode: WhatAmI::Peer, + listen: vec![locator.clone()], + con_task: ConcurrentTask::from([SequentialTask::from([Task::Sleep( + Duration::from_millis(1000), + )])]), ..Default::default() }; - - for mode in [WhatAmI::Peer, WhatAmI::Router] { - Recipe::new([ - Node { - name: format!("Router {}", mode), - mode: WhatAmI::Peer, - listen: vec![locator.clone()], - con_task: ConcurrentTask::from([SequentialTask::from([Task::Sleep( - Duration::from_millis(1000), - )])]), - ..Default::default() - }, - peer1.clone(), - peer2.clone(), - ]) + Recipe::new([node1.clone(), node2.clone(), node3]) .run() .await?; - } + } - println!("Gossip test passed."); - Result::Ok(()) - })?; - Ok(()) + println!("Gossip test passed."); + Result::Ok(()) } // Simulate two peers connecting to a router but not directly reachable to each other can exchange messages via the brokering by the router. -#[test] -fn static_failover_brokering() -> Result<()> { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn static_failover_brokering() -> Result<()> { env_logger::try_init().unwrap_or_default(); - async_std::task::block_on(async { - zasync_executor_init!(); - - let locator = String::from("tcp/127.0.0.1:17449"); - let ke = String::from("testKeyExprStaticFailoverBrokering"); - let msg_size = 8; - - let disable_autoconnect_config = || { - let mut config = Config::default(); - config - .scouting - .gossip - .set_autoconnect(Some(ModeDependentValue::Unique( - WhatAmIMatcher::from_str("").unwrap(), - ))) - .unwrap(); - Some(config) - }; + let locator = String::from("tcp/127.0.0.1:17449"); + let ke = String::from("testKeyExprStaticFailoverBrokering"); + let msg_size = 8; + + let disable_autoconnect_config = || { + let mut config = Config::default(); + config + .scouting + .gossip + .set_autoconnect(Some(ModeDependentValue::Unique( + WhatAmIMatcher::from_str("").unwrap(), + ))) + .unwrap(); + Some(config) + }; - let recipe = Recipe::new([ - Node { - name: format!("Router {}", WhatAmI::Router), - mode: WhatAmI::Router, - listen: vec![locator.clone()], - con_task: ConcurrentTask::from([SequentialTask::from([Task::Wait])]), - ..Default::default() - }, - Node { - name: format!("Pub & Queryable {}", WhatAmI::Peer), - mode: WhatAmI::Peer, - connect: vec![locator.clone()], - config: disable_autoconnect_config(), - con_task: ConcurrentTask::from([ - SequentialTask::from([Task::Pub(ke.clone(), msg_size)]), - SequentialTask::from([Task::Queryable(ke.clone(), msg_size)]), - ]), - ..Default::default() - }, - Node { - name: format!("Sub & Get {}", WhatAmI::Peer), - mode: WhatAmI::Peer, - connect: vec![locator.clone()], - config: disable_autoconnect_config(), - con_task: ConcurrentTask::from([ - SequentialTask::from([Task::Sub(ke.clone(), msg_size), Task::Checkpoint]), - SequentialTask::from([Task::Get(ke.clone(), msg_size), Task::Checkpoint]), - ]), - ..Default::default() - }, - ]); - recipe.run().await?; - println!("Static failover brokering test passed."); - Result::Ok(()) - })?; - Ok(()) + let recipe = Recipe::new([ + Node { + name: format!("Router {}", WhatAmI::Router), + mode: WhatAmI::Router, + listen: vec![locator.clone()], + con_task: ConcurrentTask::from([SequentialTask::from([Task::Wait])]), + ..Default::default() + }, + Node { + name: format!("Pub & Queryable {}", WhatAmI::Peer), + mode: WhatAmI::Peer, + connect: vec![locator.clone()], + config: disable_autoconnect_config(), + con_task: ConcurrentTask::from([ + SequentialTask::from([Task::Pub(ke.clone(), msg_size)]), + SequentialTask::from([Task::Queryable(ke.clone(), msg_size)]), + ]), + ..Default::default() + }, + Node { + name: format!("Sub & Get {}", WhatAmI::Peer), + mode: WhatAmI::Peer, + connect: vec![locator.clone()], + config: disable_autoconnect_config(), + con_task: ConcurrentTask::from([ + SequentialTask::from([Task::Sub(ke.clone(), msg_size), Task::Checkpoint]), + SequentialTask::from([Task::Get(ke.clone(), msg_size), Task::Checkpoint]), + ]), + ..Default::default() + }, + ]); + recipe.run().await?; + println!("Static failover brokering test passed."); + Result::Ok(()) } // All test cases varying in -// 1. Message size -// 2. Mode: peer or client -// 3. Spawning order -#[test] -fn three_node_combination() -> Result<()> { +// 1. Message size: 2 (sizes) +// 2. Mode: {Client, Peer} x {Client x Peer} x {Router} = 2 x 2 x 1 = 4 (cases) +// 3. Spawning order (delay_in_secs for node1, node2, and node3) = 6 (cases) +// +// Total cases = 2 x 4 x 6 = 48 +#[tokio::test(flavor = "multi_thread", worker_threads = 9)] +async fn three_node_combination() -> Result<()> { env_logger::try_init().unwrap_or_default(); - async_std::task::block_on(async { - zasync_executor_init!(); - - let modes = [WhatAmI::Peer, WhatAmI::Client]; - let delay_in_secs = [ - (0, 1, 2), - (0, 2, 1), - (1, 2, 0), - (1, 0, 2), - (2, 0, 1), - (2, 1, 0), - ]; - - let mut idx = 0; - // Ports going to be used: 17451 to 17498 - let base_port = 17450; - - let recipe_list = modes - .map(|n1| modes.map(|n2| (n1, n2))) - .concat() - .into_iter() - .flat_map(|(n1, n2)| MSG_SIZE.map(|s| (n1, n2, s))) - .flat_map(|(n1, n2, s)| delay_in_secs.map(|d| (n1, n2, s, d))) - .map( - |(node1_mode, node2_mode, msg_size, (delay1, delay2, delay3))| { - idx += 1; - let locator = format!("tcp/127.0.0.1:{}", base_port + idx); - - let ke_pubsub = format!("three_node_combination_keyexpr_pubsub_{idx}"); - let ke_getqueryable = - format!("three_node_combination_keyexpr_getqueryable_{idx}"); - - let router_node = Node { - name: format!("Router {}", WhatAmI::Router), - mode: WhatAmI::Router, - listen: vec![locator.clone()], - con_task: ConcurrentTask::from([SequentialTask::from([Task::Wait])]), - warmup: Duration::from_secs(delay1), - ..Default::default() - }; - - let (pub_node, queryable_node) = - { - let base = Node { - mode: node1_mode, - connect: vec![locator.clone()], - warmup: Duration::from_secs(delay2), - ..Default::default() - }; - - let mut pub_node = base.clone(); - pub_node.name = format!("Pub {node1_mode}"); - pub_node.con_task = - ConcurrentTask::from([SequentialTask::from([Task::Pub( - ke_pubsub.clone(), - msg_size, - )])]); - - let mut queryable_node = base; - queryable_node.name = format!("Queryable {node1_mode}"); - queryable_node.con_task = ConcurrentTask::from([SequentialTask::from( - [Task::Queryable(ke_getqueryable.clone(), msg_size)], - )]); - - (pub_node, queryable_node) - }; - - let (sub_node, get_node) = { - let base = Node { - mode: node2_mode, - connect: vec![locator], - warmup: Duration::from_secs(delay3), - ..Default::default() - }; - - let mut sub_node = base.clone(); - sub_node.name = format!("Sub {node2_mode}"); - sub_node.con_task = ConcurrentTask::from([SequentialTask::from([ - Task::Sub(ke_pubsub, msg_size), - Task::Checkpoint, - ])]); - - let mut get_node = base; - get_node.name = format!("Get {node2_mode}"); - get_node.con_task = ConcurrentTask::from([SequentialTask::from([ - Task::Get(ke_getqueryable, msg_size), - Task::Checkpoint, - ])]); - - (sub_node, get_node) - }; - - ( - Recipe::new([router_node.clone(), pub_node, sub_node]), - Recipe::new([router_node, queryable_node, get_node]), - ) - }, - ); - - for (pubsub, getqueryable) in recipe_list { - pubsub.run().await?; - getqueryable.run().await?; - } - - println!("Three-node combination test passed."); - Result::Ok(()) - })?; - Ok(()) -} - -// All test cases varying in -// 1. Message size -// 2. Mode -#[test] -fn two_node_combination() -> Result<()> { - async_std::task::block_on(async { - zasync_executor_init!(); - - #[derive(Clone, Copy)] - struct IsFirstListen(bool); - - let modes = [ - (WhatAmI::Client, WhatAmI::Peer, IsFirstListen(false)), - (WhatAmI::Peer, WhatAmI::Client, IsFirstListen(true)), - (WhatAmI::Peer, WhatAmI::Peer, IsFirstListen(true)), - (WhatAmI::Peer, WhatAmI::Peer, IsFirstListen(false)), - ]; - - let mut idx = 0; - // Ports going to be used: 17500 to 17508 - let base_port = 17500; - let recipe_list = modes - .into_iter() - .flat_map(|(n1, n2, who)| MSG_SIZE.map(|s| (n1, n2, who, s))) - .map(|(node1_mode, node2_mode, who, msg_size)| { + let modes = [WhatAmI::Peer, WhatAmI::Client]; + let delay_in_secs = [ + (0, 1, 2), + (0, 2, 1), + (1, 2, 0), + (1, 0, 2), + (2, 0, 1), + (2, 1, 0), + ]; + + let mut idx = 0; + // Ports going to be used: 17451 to 17498 + let base_port = 17450; + + let recipe_list: Vec<_> = modes + .map(|n1| modes.map(|n2| (n1, n2))) + .concat() + .into_iter() + .flat_map(|(n1, n2)| [1024].map(|s| (n1, n2, s))) + .flat_map(|(n1, n2, s)| delay_in_secs.map(|d| (n1, n2, s, d))) + .map( + |(node1_mode, node2_mode, msg_size, (delay1, delay2, delay3))| { idx += 1; - let ke_pubsub = format!("two_node_combination_keyexpr_pubsub_{idx}"); - let ke_getqueryable = format!("two_node_combination_keyexpr_getqueryable_{idx}"); + let locator = format!("tcp/127.0.0.1:{}", base_port + idx); - let (node1_listen_connect, node2_listen_connect) = { - let locator = format!("tcp/127.0.0.1:{}", base_port + idx); - let listen = vec![locator]; - let connect = vec![]; + let ke_pubsub = format!("three_node_combination_keyexpr_pubsub_{idx}"); + let ke_getqueryable = format!("three_node_combination_keyexpr_getqueryable_{idx}"); - if let IsFirstListen(true) = who { - ((listen.clone(), connect.clone()), (connect, listen)) - } else { - ((connect.clone(), listen.clone()), (listen, connect)) - } + use rand::Rng; + let mut rng = rand::thread_rng(); + + let router_node = Node { + name: format!("Router {}", WhatAmI::Router), + mode: WhatAmI::Router, + listen: vec![locator.clone()], + con_task: ConcurrentTask::from([SequentialTask::from([Task::Wait])]), + warmup: Duration::from_secs(delay1) + + Duration::from_millis(rng.gen_range(0..500)), + ..Default::default() }; let (pub_node, queryable_node) = { let base = Node { mode: node1_mode, - listen: node1_listen_connect.0, - connect: node1_listen_connect.1, + connect: vec![locator.clone()], + warmup: Duration::from_secs(delay2), ..Default::default() }; @@ -615,6 +550,7 @@ fn two_node_combination() -> Result<()> { ke_pubsub.clone(), msg_size, )])]); + pub_node.warmup += Duration::from_millis(rng.gen_range(0..500)); let mut queryable_node = base; queryable_node.name = format!("Queryable {node1_mode}"); @@ -623,6 +559,7 @@ fn two_node_combination() -> Result<()> { ke_getqueryable.clone(), msg_size, )])]); + queryable_node.warmup += Duration::from_millis(rng.gen_range(0..500)); (pub_node, queryable_node) }; @@ -630,8 +567,8 @@ fn two_node_combination() -> Result<()> { let (sub_node, get_node) = { let base = Node { mode: node2_mode, - listen: node2_listen_connect.0, - connect: node2_listen_connect.1, + connect: vec![locator], + warmup: Duration::from_secs(delay3), ..Default::default() }; @@ -641,6 +578,7 @@ fn two_node_combination() -> Result<()> { Task::Sub(ke_pubsub, msg_size), Task::Checkpoint, ])]); + sub_node.warmup += Duration::from_millis(rng.gen_range(0..500)); let mut get_node = base; get_node.name = format!("Get {node2_mode}"); @@ -648,23 +586,151 @@ fn two_node_combination() -> Result<()> { Task::Get(ke_getqueryable, msg_size), Task::Checkpoint, ])]); + get_node.warmup += Duration::from_millis(rng.gen_range(0..500)); (sub_node, get_node) }; ( - Recipe::new([pub_node, sub_node]), - Recipe::new([queryable_node, get_node]), + Recipe::new([router_node.clone(), pub_node, sub_node]), + Recipe::new([router_node, queryable_node, get_node]), ) + }, + ) + .collect(); + + for chunks in recipe_list.chunks(4).map(|x| x.to_vec()) { + let mut join_set = tokio::task::JoinSet::new(); + for (pubsub, getqueryable) in chunks { + join_set.spawn(async move { + pubsub.run().await?; + getqueryable.run().await?; + Result::Ok(()) }); + } - for (pubsub, getqueryable) in recipe_list { - pubsub.run().await?; - getqueryable.run().await?; + while let Some(res) = join_set.join_next().await { + res??; } + } - println!("Two-node combination test passed."); - Result::Ok(()) - })?; + println!("Three-node combination test passed."); Ok(()) } + +// All test cases varying in +// 1. Message size: 2 (sizes) +// 2. Mode: {Client, Peer} x {Client, Peer} x {IsFirstListen} = 2 x 2 x 2 = 8 (modes) +// +// Total cases = 2 x 8 = 16 +#[tokio::test(flavor = "multi_thread", worker_threads = 8)] +async fn two_node_combination() -> Result<()> { + env_logger::try_init().unwrap_or_default(); + + #[derive(Clone, Copy)] + struct IsFirstListen(bool); + + let modes = [ + (WhatAmI::Client, WhatAmI::Peer, IsFirstListen(false)), + (WhatAmI::Peer, WhatAmI::Client, IsFirstListen(true)), + (WhatAmI::Peer, WhatAmI::Peer, IsFirstListen(true)), + (WhatAmI::Peer, WhatAmI::Peer, IsFirstListen(false)), + ]; + + let mut idx = 0; + // Ports going to be used: 17500 to 17508 + let base_port = 17500; + let recipe_list: Vec<_> = modes + .into_iter() + .flat_map(|(n1, n2, who)| MSG_SIZE.map(|s| (n1, n2, who, s))) + .map(|(node1_mode, node2_mode, who, msg_size)| { + idx += 1; + let ke_pubsub = format!("two_node_combination_keyexpr_pubsub_{idx}"); + let ke_getqueryable = format!("two_node_combination_keyexpr_getqueryable_{idx}"); + + let (node1_listen_connect, node2_listen_connect) = { + let locator = format!("tcp/127.0.0.1:{}", base_port + idx); + let listen = vec![locator]; + let connect = vec![]; + + if let IsFirstListen(true) = who { + ((listen.clone(), connect.clone()), (connect, listen)) + } else { + ((connect.clone(), listen.clone()), (listen, connect)) + } + }; + + let (pub_node, queryable_node) = { + let base = Node { + mode: node1_mode, + listen: node1_listen_connect.0, + connect: node1_listen_connect.1, + ..Default::default() + }; + + let mut pub_node = base.clone(); + pub_node.name = format!("Pub {node1_mode}"); + pub_node.con_task = ConcurrentTask::from([SequentialTask::from([Task::Pub( + ke_pubsub.clone(), + msg_size, + )])]); + + let mut queryable_node = base; + queryable_node.name = format!("Queryable {node1_mode}"); + queryable_node.con_task = + ConcurrentTask::from([SequentialTask::from([Task::Queryable( + ke_getqueryable.clone(), + msg_size, + )])]); + + (pub_node, queryable_node) + }; + + let (sub_node, get_node) = { + let base = Node { + mode: node2_mode, + listen: node2_listen_connect.0, + connect: node2_listen_connect.1, + ..Default::default() + }; + + let mut sub_node = base.clone(); + sub_node.name = format!("Sub {node2_mode}"); + sub_node.con_task = ConcurrentTask::from([SequentialTask::from([ + Task::Sub(ke_pubsub, msg_size), + Task::Checkpoint, + ])]); + + let mut get_node = base; + get_node.name = format!("Get {node2_mode}"); + get_node.con_task = ConcurrentTask::from([SequentialTask::from([ + Task::Get(ke_getqueryable, msg_size), + Task::Checkpoint, + ])]); + + (sub_node, get_node) + }; + + ( + Recipe::new([pub_node, sub_node]), + Recipe::new([queryable_node, get_node]), + ) + }) + .collect(); + + for chunks in recipe_list.chunks(PARALLEL_RECIPES).map(|x| x.to_vec()) { + let task_tracker = TaskTracker::new(); + for (pubsub, getqueryable) in chunks { + task_tracker.spawn(async move { + pubsub.run().await?; + getqueryable.run().await?; + Result::Ok(()) + }); + } + task_tracker.close(); + task_tracker.wait().await; + } + + println!("Two-node combination test passed."); + Result::Ok(()) +} diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index c2cec7c627..b5f897be4c 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -11,13 +11,11 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::prelude::FutureExt; -use async_std::task; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use zenoh::prelude::r#async::*; -use zenoh_core::zasync_executor_init; +use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); @@ -25,12 +23,6 @@ const SLEEP: Duration = Duration::from_secs(1); const MSG_COUNT: usize = 1_000; const MSG_SIZE: [usize; 2] = [1_024, 100_000]; -macro_rules! ztimeout { - ($f:expr) => { - $f.timeout(TIMEOUT).await.unwrap() - }; -} - async fn open_session_unicast(endpoints: &[&str]) -> (Session, Session) { // Open the sessions let mut config = config::peer(); @@ -102,7 +94,7 @@ async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Re .unwrap(); // Wait for the declaration to propagate - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; // Put data println!("[PS][02b] Putting on peer02 session. {MSG_COUNT} msgs of {size} bytes."); @@ -119,7 +111,7 @@ async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Re let cnt = msgs.load(Ordering::Relaxed); println!("[PS][03b] Received {cnt}/{msg_count}."); if cnt < msg_count { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } else { break; } @@ -127,13 +119,13 @@ async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Re }); // Wait for the messages to arrive - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; println!("[PS][03b] Unsubscribing on peer01 session"); ztimeout!(sub.undeclare().res_async()).unwrap(); // Wait for the declaration to propagate - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } } @@ -156,13 +148,16 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re .callback(move |sample| { c_msgs.fetch_add(1, Ordering::Relaxed); let rep = Sample::try_from(key_expr, vec![0u8; size]).unwrap(); - task::block_on(async { ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() }); + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current() + .block_on(async { ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() }) + }); }) .res_async()) .unwrap(); // Wait for the declaration to propagate - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; // Get data println!("[QR][02c] Getting on peer02 session. {msg_count} msgs."); @@ -182,32 +177,24 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re ztimeout!(qbl.undeclare().res_async()).unwrap(); // Wait for the declaration to propagate - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } } -#[test] -fn zenoh_session_unicast() { - task::block_on(async { - zasync_executor_init!(); - let _ = env_logger::try_init(); - - let (peer01, peer02) = open_session_unicast(&["tcp/127.0.0.1:17447"]).await; - test_session_pubsub(&peer01, &peer02, Reliability::Reliable).await; - test_session_qryrep(&peer01, &peer02, Reliability::Reliable).await; - close_session(peer01, peer02).await; - }); +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn zenoh_session_unicast() { + let _ = env_logger::try_init(); + let (peer01, peer02) = open_session_unicast(&["tcp/127.0.0.1:17447"]).await; + test_session_pubsub(&peer01, &peer02, Reliability::Reliable).await; + test_session_qryrep(&peer01, &peer02, Reliability::Reliable).await; + close_session(peer01, peer02).await; } -#[test] -fn zenoh_session_multicast() { - task::block_on(async { - zasync_executor_init!(); - let _ = env_logger::try_init(); - - let (peer01, peer02) = - open_session_multicast("udp/224.0.0.1:17448", "udp/224.0.0.1:17448").await; - test_session_pubsub(&peer01, &peer02, Reliability::BestEffort).await; - close_session(peer01, peer02).await; - }); +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn zenoh_session_multicast() { + let _ = env_logger::try_init(); + let (peer01, peer02) = + open_session_multicast("udp/224.0.0.1:17448", "udp/224.0.0.1:17448").await; + test_session_pubsub(&peer01, &peer02, Reliability::BestEffort).await; + close_session(peer01, peer02).await; } diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index b986c92e8f..865121308a 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -11,25 +11,18 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::prelude::FutureExt; -use async_std::task; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; +use tokio::runtime::Handle; use zenoh::prelude::r#async::*; -use zenoh_core::zasync_executor_init; +use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); const MSG_SIZE: [usize; 2] = [1_024, 100_000]; -macro_rules! ztimeout { - ($f:expr) => { - $f.timeout(TIMEOUT).await.unwrap() - }; -} - async fn open_p2p_sessions() -> (Session, Session, Session) { // Open the sessions let mut config = config::peer(); @@ -133,7 +126,7 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { .unwrap(); // Wait for the declaration to propagate - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; // Put data println!("[PS][03b] Putting on s03 session. {msg_count} msgs of {size} bytes."); @@ -152,14 +145,14 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { println!("[PS][01b] Received {cnt1}/{msg_count}."); println!("[PS][02b] Received {cnt2}/{msg_count}."); if cnt1 < msg_count || cnt2 < msg_count { - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } else { break; } } }); - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; let cnt1 = msgs1.load(Ordering::Relaxed); println!("[QR][01c] Got on s01 session. {cnt1}/{msg_count} msgs."); @@ -175,7 +168,7 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { ztimeout!(sub1.undeclare().res_async()).unwrap(); // Wait for the declaration to propagate - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } } @@ -197,7 +190,11 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { .callback(move |sample| { c_msgs1.fetch_add(1, Ordering::Relaxed); let rep = Sample::try_from(key_expr, vec![0u8; size]).unwrap(); - task::block_on(async { ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() }); + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + }); + }); }) .res_async()) .unwrap(); @@ -210,13 +207,17 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { .callback(move |sample| { c_msgs2.fetch_add(1, Ordering::Relaxed); let rep = Sample::try_from(key_expr, vec![0u8; size]).unwrap(); - task::block_on(async { ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() }); + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + }); + }); }) .res_async()) .unwrap(); // Wait for the declaration to propagate - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; // Get data println!("[QR][03c] Getting on s03 session. {msg_count} msgs."); @@ -244,35 +245,29 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { ztimeout!(qbl2.undeclare().res_async()).unwrap(); // Wait for the declaration to propagate - task::sleep(SLEEP).await; + tokio::time::sleep(SLEEP).await; } } -#[test] -fn zenoh_unicity_p2p() { - task::block_on(async { - zasync_executor_init!(); - let _ = env_logger::try_init(); - - let (s01, s02, s03) = open_p2p_sessions().await; - test_unicity_pubsub(&s01, &s02, &s03).await; - test_unicity_qryrep(&s01, &s02, &s03).await; - close_sessions(s01, s02, s03).await; - }); +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn zenoh_unicity_p2p() { + let _ = env_logger::try_init(); + + let (s01, s02, s03) = open_p2p_sessions().await; + test_unicity_pubsub(&s01, &s02, &s03).await; + test_unicity_qryrep(&s01, &s02, &s03).await; + close_sessions(s01, s02, s03).await; } -#[test] -fn zenoh_unicity_brokered() { - task::block_on(async { - zasync_executor_init!(); - let _ = env_logger::try_init(); - let r = open_router_session().await; +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn zenoh_unicity_brokered() { + let _ = env_logger::try_init(); + let r = open_router_session().await; - let (s01, s02, s03) = open_client_sessions().await; - test_unicity_pubsub(&s01, &s02, &s03).await; - test_unicity_qryrep(&s01, &s02, &s03).await; - close_sessions(s01, s02, s03).await; + let (s01, s02, s03) = open_client_sessions().await; + test_unicity_pubsub(&s01, &s02, &s03).await; + test_unicity_qryrep(&s01, &s02, &s03).await; + close_sessions(s01, s02, s03).await; - close_router_session(r).await; - }); + close_router_session(r).await; } diff --git a/zenohd/Cargo.toml b/zenohd/Cargo.toml index 892d3df21c..81975ae2e7 100644 --- a/zenohd/Cargo.toml +++ b/zenohd/Cargo.toml @@ -30,7 +30,7 @@ default = ["zenoh/default"] shared-memory = ["zenoh/shared-memory"] [dependencies] -async-std = { workspace = true, features = ["attributes"] } +tokio = { workspace = true, features = ["rt-multi-thread"] } clap = { workspace = true, features = ["derive"] } env_logger = { workspace = true } futures = { workspace = true } @@ -55,10 +55,10 @@ license-file = ["../LICENSE", "0"] depends = "$auto" maintainer-scripts = ".deb" assets = [ - # binary - ["target/release/zenohd", "/usr/bin/", "755"], - # config - [".service/zenohd.json5", "/etc/zenohd/", "644"], - # service - [".service/zenohd.service", "/lib/systemd/system/zenohd.service", "644"], + # binary + ["target/release/zenohd", "/usr/bin/", "755"], + # config + [".service/zenohd.json5", "/etc/zenohd/", "644"], + # service + [".service/zenohd.service", "/lib/systemd/system/zenohd.service", "644"], ] diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index b0d29ea89b..7204a83612 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::task; use clap::Parser; use futures::future; use git_version::git_version; @@ -107,7 +106,11 @@ fn load_plugin( } fn main() { - task::block_on(async { + tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .unwrap() + .block_on(async { let mut log_builder = env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("z=info")); #[cfg(feature = "stats")]