diff --git a/.cirrus.yml b/.cirrus.yml index b49c99291eead..0858029d73e44 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -3,19 +3,48 @@ env: # Global defaults PACKAGE_MANAGER_INSTALL: "apt-get update && apt-get install -y" MAKEJOBS: "-j10" TEST_RUNNER_PORT_MIN: "14000" # Must be larger than 12321, which is used for the http cache. See https://cirrus-ci.org/guide/writing-tasks/#http-cache - CI_FAILFAST_TEST_LEAVE_DANGLING: "1" # Cirrus CI does not care about dangling process and setting this variable avoids killing the CI script itself on error - CCACHE_SIZE: "200M" + CI_FAILFAST_TEST_LEAVE_DANGLING: "1" # Cirrus CI does not care about dangling processes and setting this variable avoids killing the CI script itself on error + CCACHE_MAXSIZE: "200M" CCACHE_DIR: "/tmp/ccache_dir" CCACHE_NOHASHDIR: "1" # Debug info might contain a stale path if the build dir changes, but this is fine -cirrus_ephemeral_worker_template_env: &CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV - DANGER_RUN_CI_ON_HOST: "1" # Containers will be discarded after the run, so there is no risk that the ci scripts modify the system - -persistent_worker_template_env: &PERSISTENT_WORKER_TEMPLATE_ENV - RESTART_CI_DOCKER_BEFORE_RUN: "1" - -persistent_worker_template: &PERSISTENT_WORKER_TEMPLATE - persistent_worker: {} # https://cirrus-ci.org/guide/persistent-workers/ +# https://cirrus-ci.org/guide/persistent-workers/ +# +# It is possible to select a specific persistent worker by label. Refer to the +# Cirrus CI docs for more details. +# +# Generally, a persistent worker must run Ubuntu 23.04+ or Debian 12+. +# Specifically, +# - apt-get is required due to PACKAGE_MANAGER_INSTALL +# - podman-docker-4.1+ is required due to the use of `podman` when +# RESTART_CI_DOCKER_BEFORE_RUN is set and 4.1+ due to the bugfix in 4.1 +# (https://github.com/bitcoin/bitcoin/pull/21652#issuecomment-1657098200) +# - The ./ci/ depedencies (with cirrus-cli) should be installed: +# +# ``` +# apt update && apt install screen python3 bash podman-docker curl -y && curl -L -o cirrus "https://github.com/cirruslabs/cirrus-cli/releases/latest/download/cirrus-linux-$(dpkg --print-architecture)" && mv cirrus /usr/local/bin/cirrus && chmod +x /usr/local/bin/cirrus +# ``` +# +# - There are no strict requirements on the hardware, because having less CPUs +# runs the same CI script (maybe slower). To avoid rare and intermittent OOM +# due to short memory usage spikes, it is recommended to add (and persist) +# swap: +# +# ``` +# fallocate -l 16G /swapfile_ci && chmod 600 /swapfile_ci && mkswap /swapfile_ci && swapon /swapfile_ci && ( echo '/swapfile_ci none swap sw 0 0' | tee -a /etc/fstab ) +# ``` +# +# - To register the persistent worker, open a `screen` session and run: +# +# ``` +# RESTART_CI_DOCKER_BEFORE_RUN=1 screen cirrus worker run --labels type=todo_fill_in_type --token todo_fill_in_token +# ``` +# +# The following specific types should exist, with the following requirements: +# - small: For an x86_64 machine, recommended to have 2 CPUs and 8 GB of memory. +# - medium: For an x86_64 machine, recommended to have 4 CPUs and 16 GB of memory. +# - noble: For a machine running the Linux kernel shipped with exaclty Ubuntu Noble 24.04. The machine is recommended to have 4 CPUs and 16 GB of memory. +# - arm64: For an aarch64 machine, recommended to have 2 CPUs and 8 GB of memory. base_template: &BASE_TEMPLATE merge_base_script: @@ -28,26 +57,11 @@ base_template: &BASE_TEMPLATE main_template: &MAIN_TEMPLATE timeout_in: 120m # https://cirrus-ci.org/faq/#instance-timed-out - ccache_cache: - folder: "/tmp/ccache_dir" ci_script: - ./ci/test_run_all.sh -container_depends_template: &CONTAINER_DEPENDS_TEMPLATE - << : *BASE_TEMPLATE - container: - # https://cirrus-ci.org/faq/#are-there-any-limits - # Each project has 16 CPU in total, assign 2 to each container, so that 8 tasks run in parallel - cpu: 2 - greedy: true - memory: 8G # Set to 8GB to avoid OOM. https://cirrus-ci.org/guide/linux/#linux-containers - dockerfile: ci/test_imagefile # https://cirrus-ci.org/guide/docker-builder-vm/#dockerfile-as-a-ci-environment - depends_built_cache: - folder: "depends/built" - fingerprint_script: echo $CIRRUS_TASK_NAME $(git rev-parse HEAD:depends) - global_task_template: &GLOBAL_TASK_TEMPLATE - << : *CONTAINER_DEPENDS_TEMPLATE + << : *BASE_TEMPLATE << : *MAIN_TEMPLATE compute_credits_template: &CREDITS_TEMPLATE @@ -56,7 +70,7 @@ compute_credits_template: &CREDITS_TEMPLATE use_compute_credits: $CIRRUS_REPO_FULL_NAME == 'navcoin/navcoin' && $CIRRUS_PR != "" task: - name: 'lint [bookworm]' + name: 'lint' << : *BASE_TEMPLATE container: image: debian:bookworm @@ -64,269 +78,135 @@ task: memory: 1G # For faster CI feedback, immediately schedule the linters << : *CREDITS_TEMPLATE + test_runner_cache: + folder: "/lint_test_runner" + fingerprint_script: echo $CIRRUS_TASK_NAME $(git rev-parse HEAD:test/lint/test_runner) python_cache: - folder: "/tmp/python" + folder: "/python_build" fingerprint_script: cat .python-version /etc/os-release unshallow_script: - git fetch --unshallow --no-tags lint_script: - ./ci/lint_run_all.sh - env: - << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV task: - name: 'tidy [lunar]' + name: 'tidy' << : *GLOBAL_TASK_TEMPLATE - container: - cpu: 2 - memory: 5G - docker_arguments: - CI_IMAGE_NAME_TAG: ubuntu:23.04 - FILE_ENV: "./ci/test/00_setup_env_native_tidy.sh" - # For faster CI feedback, immediately schedule the linters - << : *CREDITS_TEMPLATE + persistent_worker: + labels: + type: medium env: - << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV + FILE_ENV: "./ci/test/00_setup_env_native_tidy.sh" task: - name: "Win64 native [vs2022]" - windows_container: - cpu: 8 - memory: 16G - image: cirrusci/windowsservercore:visualstudio2022 - timeout_in: 120m - env: - PATH: 'C:\jom;C:\Python39;C:\Python39\Scripts;C:\Program Files (x86)\Microsoft Visual Studio\2022\BuildTools\MSBuild\Current\Bin;%PATH%' - PYTHONUTF8: 1 - CI_VCPKG_TAG: '2023.01.09' - VCPKG_DOWNLOADS: 'C:\Users\ContainerAdministrator\AppData\Local\vcpkg\downloads' - VCPKG_DEFAULT_BINARY_CACHE: 'C:\Users\ContainerAdministrator\AppData\Local\vcpkg\archives' - CCACHE_DIR: 'C:\Users\ContainerAdministrator\AppData\Local\ccache' - WRAPPED_CL: 'C:\Users\ContainerAdministrator\AppData\Local\Temp\cirrus-ci-build\ci\test\wrapped-cl.bat' - x64_NATIVE_TOOLS: '"C:\Program Files (x86)\Microsoft Visual Studio\2022\BuildTools\VC\Auxiliary\Build\vcvars64.bat"' - IgnoreWarnIntDirInTempDetected: 'true' - merge_script: - - PowerShell -NoLogo -Command if ($env:CIRRUS_PR -ne $null) { git fetch $env:CIRRUS_REPO_CLONE_URL pull/$env:CIRRUS_PR/merge; git reset --hard FETCH_HEAD; } - vcpkg_tools_cache: - folder: '%VCPKG_DOWNLOADS%\tools' - reupload_on_changes: false - fingerprint_script: - - echo %CI_VCPKG_TAG% - - msbuild -version - vcpkg_binary_cache: - folder: '%VCPKG_DEFAULT_BINARY_CACHE%' - reupload_on_changes: true - fingerprint_script: - - echo %CI_VCPKG_TAG% - - type build_msvc\vcpkg.json - - msbuild -version - populate_script: - - mkdir %VCPKG_DEFAULT_BINARY_CACHE% - ccache_cache: - folder: '%CCACHE_DIR%' - install_tools_script: - - choco install --yes --no-progress ccache --version=4.7.4 - - choco install --yes --no-progress python3 --version=3.9.6 - - pip install zmq - - ccache --version - - python -VV - install_vcpkg_script: - - cd .. - - git clone --quiet https://github.com/microsoft/vcpkg.git - - cd vcpkg - - git -c advice.detachedHead=false checkout %CI_VCPKG_TAG% - - .\bootstrap-vcpkg -disableMetrics - - echo set(VCPKG_BUILD_TYPE release) >> triplets\x64-windows-static.cmake - - .\vcpkg integrate install - - .\vcpkg version - build_script: - - '%x64_NATIVE_TOOLS%' - - cd %CIRRUS_WORKING_DIR% - - ccache --zero-stats --max-size=%CCACHE_SIZE% - - python build_msvc\msvc-autogen.py - - msbuild build_msvc\bitcoin.sln -property:CLToolExe=%WRAPPED_CL%;UseMultiToolTask=true;Configuration=Release -verbosity:minimal -noLogo - - ccache --show-stats - check_script: - - build_msvc\x64\Release\test_navcoin.exe -l test_suite - - build_msvc\x64\Release\bench_bench_navcoin.exe --sanity-check - - python test\util\test_runner.py - - python test\util\rpcauth-test.py - functional_tests_script: - # Increase the dynamic port range to the maximum allowed value to mitigate "OSError: [WinError 10048] Only one usage of each socket address (protocol/network address/port) is normally permitted". - # See: https://learn.microsoft.com/en-us/biztalk/technical-guides/settings-that-can-be-modified-to-improve-network-performance - - netsh int ipv4 set dynamicport tcp start=1025 num=64511 - - netsh int ipv6 set dynamicport tcp start=1025 num=64511 - # Exclude feature_dbcrash for now due to timeout - - python test\functional\test_runner.py --nocleanup --ci --quiet --combinedlogslen=99999999 --jobs=6 --timeout-factor=8 --extended --exclude feature_dbcrash - -task: - name: 'ARM [unit tests, no functional tests] [bookworm]' + name: 'ARM, unit tests, no functional tests' << : *GLOBAL_TASK_TEMPLATE - container: - docker_arguments: - CI_IMAGE_NAME_TAG: debian:bookworm - FILE_ENV: "./ci/test/00_setup_env_arm.sh" + persistent_worker: + labels: + type: arm64 # Use arm64 worker to sidestep qemu and avoid a slow CI: https://github.com/bitcoin/bitcoin/pull/28087#issuecomment-1649399453 env: - << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV + FILE_ENV: "./ci/test/00_setup_env_arm.sh" task: - name: 'Win64 [unit tests, no boost::process, no functional tests] [jammy]' + name: 'Win64, unit tests, no functional tests' << : *GLOBAL_TASK_TEMPLATE - container: - docker_arguments: - CI_IMAGE_NAME_TAG: ubuntu:jammy - FILE_ENV: "./ci/test/00_setup_env_win64.sh" - << : *CREDITS_TEMPLATE + persistent_worker: + labels: + type: small env: - << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV + FILE_ENV: "./ci/test/00_setup_env_win64.sh" task: - name: '32-bit + dash [CentOS 8]' + name: '32-bit CentOS, dash' << : *GLOBAL_TASK_TEMPLATE - container: - docker_arguments: - CI_IMAGE_NAME_TAG: quay.io/centos/centos:stream8 - FILE_ENV: "./ci/test/00_setup_env_i686_centos.sh" - # For faster CI feedback, immediately schedule one task that runs all tests - << : *CREDITS_TEMPLATE + persistent_worker: + labels: + type: small env: - << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV - PACKAGE_MANAGER_INSTALL: "yum install -y" + FILE_ENV: "./ci/test/00_setup_env_i686_centos.sh" task: - name: '[previous releases, dev package and depends packages, DEBUG] [focal]' - previous_releases_cache: - folder: "releases" + name: 'previous releases, depends DEBUG' << : *GLOBAL_TASK_TEMPLATE - << : *PERSISTENT_WORKER_TEMPLATE + persistent_worker: + labels: + type: small env: - << : *PERSISTENT_WORKER_TEMPLATE_ENV - FILE_ENV: "./ci/test/00_setup_env_native.sh" + FILE_ENV: "./ci/test/00_setup_env_native_previous_releases.sh" task: - name: '[TSan, depends] [lunar]' + name: 'TSan, depends' << : *GLOBAL_TASK_TEMPLATE - container: - cpu: 6 # Increase CPU and Memory to avoid timeout - memory: 24G - docker_arguments: - CI_IMAGE_NAME_TAG: ubuntu:23.04 - FILE_ENV: "./ci/test/00_setup_env_native_tsan.sh" + persistent_worker: + labels: + type: medium env: - << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV + FILE_ENV: "./ci/test/00_setup_env_native_tsan.sh" task: - name: '[MSan, depends] [lunar]' + name: 'MSan, depends' << : *GLOBAL_TASK_TEMPLATE - container: - docker_arguments: - CI_IMAGE_NAME_TAG: ubuntu:lunar - FILE_ENV: "./ci/test/00_setup_env_native_msan.sh" + persistent_worker: + labels: + type: small + timeout_in: 300m # Use longer timeout for the *rare* case where a full build (llvm + msan + depends + ...) needs to be done. env: - << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV - MAKEJOBS: "-j4" # Avoid excessive memory use due to MSan + FILE_ENV: "./ci/test/00_setup_env_native_msan.sh" task: - name: '[ASan + LSan + UBSan + integer, no depends, USDT] [lunar]' + name: 'ASan + LSan + UBSan + integer, no depends, USDT' + enable_bpfcc_script: + # In the image build step, no external environment variables are available, + # so any settings will need to be written to the settings env file: + - sed -i "s|\${CIRRUS_CI}|true|g" ./ci/test/00_setup_env_native_asan.sh << : *GLOBAL_TASK_TEMPLATE - # We can't use a 'container' for the USDT interface tests as the CirrusCI - # containers don't have privileges to hook into bitcoind. CirrusCI uses - # Google Compute Engine instances: https://cirrus-ci.org/guide/custom-vms/ - # Images can be found here: https://cloud.google.com/compute/docs/images/os-details - compute_engine_instance: - image_project: ubuntu-os-cloud - image: family/ubuntu-2304-amd64 # https://cirrus-ci.org/guide/custom-vms/#custom-compute-engine-vms - cpu: 4 - disk: 100 - memory: 12G + persistent_worker: + labels: + type: noble # Must use this specific worker (needed for USDT functional tests) env: - << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV - HOME: /root/ # Only needed for compute_engine_instance FILE_ENV: "./ci/test/00_setup_env_native_asan.sh" - MAKEJOBS: "-j4" # Avoid excessive memory use task: - name: '[fuzzer,address,undefined,integer, no depends] [lunar]' + name: 'fuzzer,address,undefined,integer, no depends' << : *GLOBAL_TASK_TEMPLATE - container: - cpu: 4 # Increase CPU and memory to avoid timeout - memory: 16G - docker_arguments: - CI_IMAGE_NAME_TAG: ubuntu:lunar - FILE_ENV: "./ci/test/00_setup_env_native_fuzz.sh" + persistent_worker: + labels: + type: medium env: - << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV + FILE_ENV: "./ci/test/00_setup_env_native_fuzz.sh" task: - name: '[multiprocess, i686, DEBUG] [focal]' + name: 'multiprocess, i686, DEBUG' << : *GLOBAL_TASK_TEMPLATE - container: - cpu: 4 - memory: 16G # The default memory is sometimes just a bit too small, so double everything - docker_arguments: - CI_IMAGE_NAME_TAG: ubuntu:focal - FILE_ENV: "./ci/test/00_setup_env_i686_multiprocess.sh" + persistent_worker: + labels: + type: medium env: - << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV + FILE_ENV: "./ci/test/00_setup_env_i686_multiprocess.sh" task: - name: '[no wallet, libbitcoinkernel] [focal]' + name: 'no wallet, libblsct' << : *GLOBAL_TASK_TEMPLATE - container: - docker_arguments: - CI_IMAGE_NAME_TAG: ubuntu:focal - FILE_ENV: "./ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh" - << : *CREDITS_TEMPLATE - env: - << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV - -task: - name: 'macOS 10.15 [no tests] [focal]' - << : *CONTAINER_DEPENDS_TEMPLATE - container: - docker_arguments: - CI_IMAGE_NAME_TAG: ubuntu:focal - FILE_ENV: "./ci/test/00_setup_env_mac.sh" - << : *CREDITS_TEMPLATE - macos_sdk_cache: - folder: "depends/SDKs/$MACOS_SDK" - fingerprint_key: "$MACOS_SDK" - << : *MAIN_TEMPLATE + persistent_worker: + labels: + type: small env: - MACOS_SDK: "Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers" - << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV + FILE_ENV: "./ci/test/00_setup_env_native_libblsct_only.sh" task: - name: 'macOS 13 native arm64 [sqlite only] [no depends]' - macos_instance: - # Use latest image, but hardcode version to avoid silent upgrades (and breaks) - image: ghcr.io/cirruslabs/macos-ventura-xcode:14.1 # https://cirrus-ci.org/guide/macOS - << : *BASE_TEMPLATE - check_clang_script: - - clang --version - brew_install_script: - - brew install boost libevent qt@5 miniupnpc libnatpmp ccache zeromq qrencode libtool automake gnu-getopt - << : *MAIN_TEMPLATE + name: 'no wallet, libbitcoinkernel' + << : *GLOBAL_TASK_TEMPLATE + persistent_worker: + labels: + type: small env: - << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV - CI_USE_APT_INSTALL: "no" - PACKAGE_MANAGER_INSTALL: "echo" # Nothing to do - FILE_ENV: "./ci/test/00_setup_env_mac_native_arm64.sh" + FILE_ENV: "./ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh" task: - name: 'ARM64 Android APK [jammy]' - << : *CONTAINER_DEPENDS_TEMPLATE - container: - docker_arguments: - CI_IMAGE_NAME_TAG: ubuntu:jammy - FILE_ENV: "./ci/test/00_setup_env_android.sh" - << : *CREDITS_TEMPLATE - android_sdk_cache: - folder: "depends/SDKs/android" - fingerprint_key: "ANDROID_API_LEVEL=28 ANDROID_BUILD_TOOLS_VERSION=28.0.3 ANDROID_NDK_VERSION=23.2.8568313" - depends_sources_cache: - folder: "depends/sources" - fingerprint_script: git rev-parse HEAD:depends/packages - << : *MAIN_TEMPLATE + name: 'macOS-cross, no tests' + << : *GLOBAL_TASK_TEMPLATE + persistent_worker: + labels: + type: small env: - << : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV + FILE_ENV: "./ci/test/00_setup_env_mac_cross.sh" diff --git a/.github/ISSUE_TEMPLATE/good_first_issue.yml b/.github/ISSUE_TEMPLATE/good_first_issue.yml index c40dad9687a05..133937c011af6 100644 --- a/.github/ISSUE_TEMPLATE/good_first_issue.yml +++ b/.github/ISSUE_TEMPLATE/good_first_issue.yml @@ -29,8 +29,10 @@ body: attributes: label: Useful Skills description: For example, “`std::thread`”, “Qt5 GUI and async GUI design” or “basic understanding of Bitcoin mining and the Bitcoin Core RPC interface”. - validations: - required: false + value: | + * Compiling Bitcoin Core from source + * Running the C++ unit tests and the Python functional tests + * ... - type: textarea attributes: label: Guidance for new contributors diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000000000..33783a0c6fd72 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,236 @@ +# Copyright (c) 2023 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +name: CI +on: + # See: https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request. + pull_request: + # See: https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#push. + push: + branches: + - '**' + tags-ignore: + - '**' + +concurrency: + group: ${{ github.event_name != 'pull_request' && github.run_id || github.ref }} + cancel-in-progress: true + +env: + DANGER_RUN_CI_ON_HOST: 1 + CI_FAILFAST_TEST_LEAVE_DANGLING: 1 # GHA does not care about dangling processes and setting this variable avoids killing the CI script itself on error + MAKEJOBS: '-j10' + +jobs: + test-each-commit: + name: 'test each commit' + runs-on: ubuntu-22.04 + if: github.event_name == 'pull_request' && github.event.pull_request.commits != 1 + timeout-minutes: 360 # Use maximum time, see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idtimeout-minutes. Assuming a worst case time of 1 hour per commit, this leads to a --max-count=6 below. + env: + MAX_COUNT: 6 + steps: + - name: Determine fetch depth + run: echo "FETCH_DEPTH=$((${{ github.event.pull_request.commits }} + 2))" >> "$GITHUB_ENV" + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + fetch-depth: ${{ env.FETCH_DEPTH }} + - name: Determine commit range + run: | + # Checkout HEAD~ and find the test base commit + # Checkout HEAD~ because it would be wasteful to rerun tests on the PR + # head commit that are already run by other jobs. + git checkout HEAD~ + # Figure out test base commit by listing ancestors of HEAD, excluding + # ancestors of the most recent merge commit, limiting the list to the + # newest MAX_COUNT ancestors, ordering it from oldest to newest, and + # taking the first one. + # + # If the branch contains up to MAX_COUNT ancestor commits after the + # most recent merge commit, all of those commits will be tested. If it + # contains more, only the most recent MAX_COUNT commits will be + # tested. + # + # In the command below, the ^@ suffix is used to refer to all parents + # of the merge commit as described in: + # https://git-scm.com/docs/git-rev-parse#_other_rev_parent_shorthand_notations + # and the ^ prefix is used to exclude these parents and all their + # ancestors from the rev-list output as described in: + # https://git-scm.com/docs/git-rev-list + echo "TEST_BASE=$(git rev-list -n$((${{ env.MAX_COUNT }} + 1)) --reverse HEAD ^$(git rev-list -n1 --merges HEAD)^@ | head -1)" >> "$GITHUB_ENV" + - run: | + sudo apt-get update + sudo apt-get install clang-15 ccache build-essential libtool autotools-dev automake pkg-config bsdmainutils python3-zmq libevent-dev libboost-dev libsqlite3-dev libdb++-dev systemtap-sdt-dev libminiupnpc-dev libnatpmp-dev -y + - name: Compile and run tests + run: | + # Run tests on commits after the last merge commit and before the PR head commit + # Use clang++, because it is a bit faster and uses less memory than g++ + git rebase --exec "echo Running test-one-commit on \$( git log -1 ) && ./autogen.sh && CC=clang-15 CXX=clang++-15 ./configure && make clean && make -j $(nproc) check && ./test/functional/test_runner.py -j $(( $(nproc) * 2 ))" ${{ env.TEST_BASE }} + + macos-native-x86_64: + name: 'macOS 13 native, x86_64, no depends, sqlite only' + # Use latest image, but hardcode version to avoid silent upgrades (and breaks). + # See: https://github.com/actions/runner-images#available-images. + runs-on: macos-13 + + # No need to run on the read-only mirror, unless it is a PR. + if: github.repository != 'bitcoin-core/gui' || github.event_name == 'pull_request' + + timeout-minutes: 120 + + env: + FILE_ENV: './ci/test/00_setup_env_mac_native.sh' + BASE_ROOT_DIR: ${{ github.workspace }} + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Clang version + run: | + sudo xcode-select --switch /Applications/Xcode_15.0.app + clang --version + + - name: Install Homebrew packages + env: + HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK: 1 + run: brew install automake libtool pkg-config gnu-getopt ccache boost libevent miniupnpc libnatpmp zeromq + + - name: Set Ccache directory + run: echo "CCACHE_DIR=${RUNNER_TEMP}/ccache_dir" >> "$GITHUB_ENV" + + - name: Restore Ccache cache + id: ccache-cache + uses: actions/cache/restore@v3 + with: + path: ${{ env.CCACHE_DIR }} + key: ${{ github.job }}-ccache-${{ github.run_id }} + restore-keys: ${{ github.job }}-ccache- + + - name: CI script + run: ./ci/test_run_all.sh + + - name: Save Ccache cache + uses: actions/cache/save@v3 + if: github.event_name != 'pull_request' && steps.ccache-cache.outputs.cache-hit != 'true' + with: + path: ${{ env.CCACHE_DIR }} + # https://github.com/actions/cache/blob/main/tips-and-workarounds.md#update-a-cache + key: ${{ github.job }}-ccache-${{ github.run_id }} + + win64-native: + name: 'Win64 native, VS 2022' + # Use latest image, but hardcode version to avoid silent upgrades (and breaks). + # See: https://github.com/actions/runner-images#available-images. + runs-on: windows-2022 + + # No need to run on the read-only mirror, unless it is a PR. + if: github.repository != 'bitcoin-core/gui' || github.event_name == 'pull_request' + + env: + CCACHE_MAXSIZE: '200M' + CI_CCACHE_VERSION: '4.7.5' + PYTHONUTF8: 1 + TEST_RUNNER_TIMEOUT_FACTOR: 40 + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Configure Developer Command Prompt for Microsoft Visual C++ + # Using microsoft/setup-msbuild is not enough. + uses: ilammy/msvc-dev-cmd@v1 + with: + arch: x64 + + - name: Check MSBuild + run: | + msbuild -version | Out-File -FilePath "$env:GITHUB_WORKSPACE\msbuild_version" + Get-Content -Path "$env:GITHUB_WORKSPACE\msbuild_version" + $env:VCToolsVersion | Out-File -FilePath "$env:GITHUB_WORKSPACE\toolset_version" + Get-Content -Path "$env:GITHUB_WORKSPACE\toolset_version" + + - name: Ccache installation cache + id: ccache-installation-cache + uses: actions/cache@v3 + with: + path: | + C:\ProgramData\chocolatey\lib\ccache + C:\ProgramData\chocolatey\bin\ccache.exe + C:\ccache\cl.exe + key: ${{ github.job }}-ccache-installation-${{ env.CI_CCACHE_VERSION }} + + - name: Install Ccache + if: steps.ccache-installation-cache.outputs.cache-hit != 'true' + run: | + choco install --yes --no-progress ccache --version=$env:CI_CCACHE_VERSION + New-Item -ItemType Directory -Path "C:\ccache" + Copy-Item -Path "$env:ChocolateyInstall\lib\ccache\tools\ccache-$env:CI_CCACHE_VERSION-windows-x86_64\ccache.exe" -Destination "C:\ccache\cl.exe" + + - name: Restore Ccache cache + id: ccache-cache + uses: actions/cache/restore@v3 + with: + path: ~/AppData/Local/ccache + key: ${{ github.job }}-ccache-${{ github.run_id }} + restore-keys: ${{ github.job }}-ccache- + + - name: Using vcpkg with MSBuild + run: | + Set-Location "$env:VCPKG_INSTALLATION_ROOT" + Add-Content -Path "triplets\x64-windows-static.cmake" -Value "set(VCPKG_BUILD_TYPE release)" + Add-Content -Path "triplets\x64-windows-static.cmake" -Value "set(VCPKG_PLATFORM_TOOLSET_VERSION $env:VCToolsVersion)" + .\vcpkg.exe --vcpkg-root "$env:VCPKG_INSTALLATION_ROOT" integrate install + git rev-parse HEAD | Out-File -FilePath "$env:GITHUB_WORKSPACE\vcpkg_commit" + Get-Content -Path "$env:GITHUB_WORKSPACE\vcpkg_commit" + + - name: vcpkg tools cache + uses: actions/cache@v3 + with: + path: C:/vcpkg/downloads/tools + key: ${{ github.job }}-vcpkg-tools + + - name: vcpkg binary cache + uses: actions/cache@v3 + with: + path: ~/AppData/Local/vcpkg/archives + key: ${{ github.job }}-vcpkg-binary-${{ hashFiles('vcpkg_commit', 'msbuild_version', 'toolset_version', 'build_msvc/vcpkg.json') }} + + - name: Generate project files + run: py -3 build_msvc\msvc-autogen.py + + - name: Build + shell: cmd + run: | + ccache --zero-stats + msbuild build_msvc\bitcoin.sln -property:CLToolPath=C:\ccache;CLToolExe=cl.exe;UseMultiToolTask=true;Configuration=Release -maxCpuCount -verbosity:minimal -noLogo + + - name: Ccache stats + run: ccache --show-stats + + - name: Save Ccache cache + uses: actions/cache/save@v3 + if: github.event_name != 'pull_request' && steps.ccache-cache.outputs.cache-hit != 'true' + with: + path: ~/AppData/Local/ccache + # https://github.com/actions/cache/blob/main/tips-and-workarounds.md#update-a-cache + key: ${{ github.job }}-ccache-${{ github.run_id }} + + - name: Run unit tests + run: src\test_navcoin.exe -l test_suite + + - name: Run benchmarks + run: src\bench_navcoin.exe -sanity-check + + - name: Run util tests + run: py -3 test\util\test_runner.py + + - name: Run rpcauth test + run: py -3 test\util\rpcauth-test.py + + - name: Run functional tests + env: + TEST_RUNNER_EXTRA: ${{ github.event_name != 'pull_request' && '--extended' || '' }} + run: py -3 test\functional\test_runner.py --jobs $env:NUMBER_OF_PROCESSORS --ci --quiet --tmpdirprefix=$env:RUNNER_TEMP --combinedlogslen=99999999 --timeout-factor=$env:TEST_RUNNER_TIMEOUT_FACTOR $env:TEST_RUNNER_EXTRA diff --git a/.gitignore b/.gitignore index 8461e24d554cd..28e98aa96849c 100644 --- a/.gitignore +++ b/.gitignore @@ -2,10 +2,10 @@ *.exe *.pdb +*.tmp src/navcoin src/navcoind src/navcoin-cli -src/navcoin-gui src/navcoin-node src/navcoin-tx src/navcoin-util @@ -13,7 +13,6 @@ src/navcoin-chainstate src/navcoin-wallet src/test/fuzz/fuzz src/test/test_navcoin -src/qt/test/test_navcoin-qt # autoreconf Makefile.in @@ -37,24 +36,10 @@ config.log config.status configure libtool -src/config/bitcoin-config.h -src/config/bitcoin-config.h.in +src/config/navcoin-config.h +src/config/navcoin-config.h.in src/config/stamp-h1 src/obj -share/setup.nsi -share/qt/Info.plist - -src/qt/*.moc -src/qt/moc_*.cpp -src/qt/forms/ui_*.h - -src/qt/test/moc*.cpp - -src/qt/bitcoin-qt.config -src/qt/bitcoin-qt.creator -src/qt/bitcoin-qt.creator.user -src/qt/bitcoin-qt.files -src/qt/bitcoin-qt.includes .deps .dirstamp @@ -92,15 +77,12 @@ src/qt/bitcoin-qt.includes *.qm Makefile !depends/Makefile -src/qt/navcoin-qt -Navcoin-Qt.app # Qt Creator Makefile.am.user # Unit-tests Makefile.test -navcoin-qt_test # Resources cpp qrc_*.cpp @@ -116,7 +98,7 @@ releases *.gcno *.gcda /*.info -test_bitcoin.coverage/ +test_navcoin.coverage/ total.coverage/ fuzz.coverage/ coverage_percent.txt @@ -130,12 +112,13 @@ win32-build test/config.ini test/cache/* test/.mypy_cache/ +test/lint/test_runner/target/ !src/leveldb*/Makefile /doc/doxygen/ -libbitcoinconsensus.pc +libnavcoinconsensus.pc contrib/devtools/split-debug.sh # Output from running db4 installation diff --git a/.style.yapf b/.style.yapf index 69d8c6aee417d..350ac638554e5 100644 --- a/.style.yapf +++ b/.style.yapf @@ -107,7 +107,7 @@ each_dict_entry_on_separate_line=True i18n_comment= # The i18n function call names. The presence of this function stops -# reformattting on that line, because the string it has cannot be moved +# reformatting on that line, because the string it has cannot be moved # away from the i18n comment. i18n_function_call= diff --git a/Makefile.am b/Makefile.am index 6372e8731209d..2df4cc9d1b299 100644 --- a/Makefile.am +++ b/Makefile.am @@ -16,21 +16,18 @@ endif export PYTHONPATH -if BUILD_BITCOIN_LIBS +if BUILD_NAVCOIN_LIBS pkgconfigdir = $(libdir)/pkgconfig -pkgconfig_DATA = libbitcoinconsensus.pc +pkgconfig_DATA = libnavcoinconsensus.pc endif -BITCOIND_BIN=$(top_builddir)/src/$(BITCOIN_DAEMON_NAME)$(EXEEXT) -BITCOIN_QT_BIN=$(top_builddir)/src/qt/$(BITCOIN_GUI_NAME)$(EXEEXT) -BITCOIN_TEST_BIN=$(top_builddir)/src/test/$(BITCOIN_TEST_NAME)$(EXEEXT) -BITCOIN_CLI_BIN=$(top_builddir)/src/$(BITCOIN_CLI_NAME)$(EXEEXT) -BITCOIN_TX_BIN=$(top_builddir)/src/$(BITCOIN_TX_NAME)$(EXEEXT) -BITCOIN_UTIL_BIN=$(top_builddir)/src/$(BITCOIN_UTIL_NAME)$(EXEEXT) -BITCOIN_WALLET_BIN=$(top_builddir)/src/$(BITCOIN_WALLET_TOOL_NAME)$(EXEEXT) -BITCOIN_NODE_BIN=$(top_builddir)/src/$(BITCOIN_MP_NODE_NAME)$(EXEEXT) -BITCOIN_GUI_BIN=$(top_builddir)/src/$(BITCOIN_MP_GUI_NAME)$(EXEEXT) -BITCOIN_WIN_INSTALLER=$(PACKAGE)-$(PACKAGE_VERSION)-win64-setup$(EXEEXT) +NAVCOIND_BIN=$(top_builddir)/src/$(NAVCOIN_DAEMON_NAME)$(EXEEXT) +NAVCOIN_TEST_BIN=$(top_builddir)/src/test/$(NAVCOIN_TEST_NAME)$(EXEEXT) +NAVCOIN_CLI_BIN=$(top_builddir)/src/$(NAVCOIN_CLI_NAME)$(EXEEXT) +NAVCOIN_TX_BIN=$(top_builddir)/src/$(NAVCOIN_TX_NAME)$(EXEEXT) +NAVCOIN_UTIL_BIN=$(top_builddir)/src/$(NAVCOIN_UTIL_NAME)$(EXEEXT) +NAVCOIN_WALLET_BIN=$(top_builddir)/src/$(NAVCOIN_WALLET_TOOL_NAME)$(EXEEXT) +NAVCOIN_NODE_BIN=$(top_builddir)/src/$(NAVCOIN_MP_NODE_NAME)$(EXEEXT) empty := space := $(empty) $(empty) @@ -60,22 +57,22 @@ COVERAGE_INFO = $(COV_TOOL_WRAPPER) baseline.info \ dist-hook: -$(GIT) archive --format=tar HEAD -- src/clientversion.cpp | $(AMTAR) -C $(top_distdir) -xf - -$(BITCOIND_BIN): FORCE +$(NAVCOIND_BIN): FORCE $(MAKE) -C src $(@F) -$(BITCOIN_CLI_BIN): FORCE +$(NAVCOIN_CLI_BIN): FORCE $(MAKE) -C src $(@F) -$(BITCOIN_TX_BIN): FORCE +$(NAVCOIN_TX_BIN): FORCE $(MAKE) -C src $(@F) -$(BITCOIN_UTIL_BIN): FORCE +$(NAVCOIN_UTIL_BIN): FORCE $(MAKE) -C src $(@F) -$(BITCOIN_WALLET_BIN): FORCE +$(NAVCOIN_WALLET_BIN): FORCE $(MAKE) -C src $(@F) -$(BITCOIN_NODE_BIN): FORCE +$(NAVCOIN_NODE_BIN): FORCE $(MAKE) -C src $(@F) if USE_LCOV @@ -224,8 +221,6 @@ EXTRA_DIST += \ test/util/data/txcreatesignv2.hex \ test/util/rpcauth-test.py -CLEANFILES = $(OSX_DMG) $(BITCOIN_WIN_INSTALLER) - DISTCHECK_CONFIGURE_FLAGS = --enable-man doc/doxygen/.stamp: doc/Doxyfile FORCE diff --git a/build_msvc/msvc-autogen.py b/build_msvc/msvc-autogen.py index 33f82f13542be..35ee755531029 100755 --- a/build_msvc/msvc-autogen.py +++ b/build_msvc/msvc-autogen.py @@ -72,7 +72,7 @@ def find_between( s, first, last ): version = config_dict["PACKAGE_VERSION"].strip('"') config_dict["PACKAGE_STRING"] = f"\"Navcoin Core {version}\"" - with open(os.path.join(SOURCE_DIR,'../build_msvc/bitcoin_config.h.in'), "r", encoding="utf8") as template_file: + with open(os.path.join(SOURCE_DIR,'../build_msvc/navcoin_config.h.in'), "r", encoding="utf8") as template_file: template = template_file.readlines() for index, line in enumerate(template): @@ -82,7 +82,7 @@ def find_between( s, first, last ): if header in config_dict: template[index] = line.replace("$", f"{config_dict[header]}") - with open(os.path.join(SOURCE_DIR,'../build_msvc/bitcoin_config.h'), "w", encoding="utf8") as btc_config: + with open(os.path.join(SOURCE_DIR,'../build_msvc/navcoin_config.h'), "w", encoding="utf8") as btc_config: btc_config.writelines(template) def set_properties(vcxproj_filename, placeholder, content): @@ -110,7 +110,7 @@ def main(): content += ' \n' set_properties(vcxproj_filename, '@SOURCE_FILES@\n', content) parse_config_into_btc_config() - copyfile(os.path.join(SOURCE_DIR,'../build_msvc/bitcoin_config.h'), os.path.join(SOURCE_DIR, 'config/bitcoin-config.h')) + copyfile(os.path.join(SOURCE_DIR,'../build_msvc/navcoin_config.h'), os.path.join(SOURCE_DIR, 'config/navcoin-config.h')) if __name__ == '__main__': main() diff --git a/build_msvc/bitcoin_config.h.in b/build_msvc/navcoin_config.h.in similarity index 100% rename from build_msvc/bitcoin_config.h.in rename to build_msvc/navcoin_config.h.in diff --git a/ci/README.md b/ci/README.md index d014565f4473c..7cf3b1f563b36 100644 --- a/ci/README.md +++ b/ci/README.md @@ -20,16 +20,11 @@ requires `bash`, `docker`, and `python3` to be installed. To install all require sudo apt install bash docker.io python3 ``` -To run the default test stage, +It is recommended to run the ci system in a clean env. To run the test stage +with a specific configuration, ``` -./ci/test_run_all.sh -``` - -To run the test stage with a specific configuration, - -``` -FILE_ENV="./ci/test/00_setup_env_arm.sh" ./ci/test_run_all.sh +env -i HOME="$HOME" PATH="$PATH" USER="$USER" bash -c 'FILE_ENV="./ci/test/00_setup_env_arm.sh" ./ci/test_run_all.sh' ``` ### Configurations @@ -44,14 +39,11 @@ the system package manager to install build dependencies. This guarantees that the tester is using the same versions as the release builds, which also use `./depends`. -If no `FILE_ENV` has been specified or values are left out, `00_setup_env.sh` -is used as the default configuration with fallback values. - It is also possible to force a specific configuration without modifying the file. For example, ``` -MAKEJOBS="-j1" FILE_ENV="./ci/test/00_setup_env_arm.sh" ./ci/test_run_all.sh +env -i HOME="$HOME" PATH="$PATH" USER="$USER" bash -c 'MAKEJOBS="-j1" FILE_ENV="./ci/test/00_setup_env_arm.sh" ./ci/test_run_all.sh' ``` The files starting with `0n` (`n` greater than 0) are the scripts that are run @@ -60,5 +52,5 @@ in order. ### Cache In order to avoid rebuilding all dependencies for each build, the binaries are -cached and re-used when possible. Changes in the dependency-generator will +cached and reused when possible. Changes in the dependency-generator will trigger cache-invalidation and rebuilds as necessary. diff --git a/ci/lint/04_install.sh b/ci/lint/04_install.sh index f7147582dc1da..476417d04b9ee 100755 --- a/ci/lint/04_install.sh +++ b/ci/lint/04_install.sh @@ -6,6 +6,8 @@ export LC_ALL=C +export PATH=$PWD/ci/retry:$PATH + ${CI_RETRY_EXE} apt-get update # Lint dependencies: # - curl/xz-utils (to install shellcheck) @@ -13,31 +15,42 @@ ${CI_RETRY_EXE} apt-get update # - gpg (used by verify-commits) ${CI_RETRY_EXE} apt-get install -y curl xz-utils git gpg -if [ -z "${SKIP_PYTHON_INSTALL}" ]; then - PYTHON_PATH=/tmp/python - if [ ! -d "${PYTHON_PATH}/bin" ]; then - ( - git clone https://github.com/pyenv/pyenv.git - cd pyenv/plugins/python-build || exit 1 - ./install.sh - ) - # For dependencies see https://github.com/pyenv/pyenv/wiki#suggested-build-environment - ${CI_RETRY_EXE} apt-get install -y build-essential libssl-dev zlib1g-dev \ - libbz2-dev libreadline-dev libsqlite3-dev curl llvm \ - libncursesw5-dev xz-utils tk-dev libxml2-dev libxmlsec1-dev libffi-dev liblzma-dev \ - clang - env CC=clang python-build "$(cat "${BASE_ROOT_DIR}/.python-version")" "${PYTHON_PATH}" - fi - export PATH="${PYTHON_PATH}/bin:${PATH}" - command -v python3 - python3 --version +PYTHON_PATH="/python_build" +if [ ! -d "${PYTHON_PATH}/bin" ]; then + ( + ${CI_RETRY_EXE} git clone https://github.com/pyenv/pyenv.git + cd pyenv/plugins/python-build || exit 1 + ./install.sh + ) + # For dependencies see https://github.com/pyenv/pyenv/wiki#suggested-build-environment + ${CI_RETRY_EXE} apt-get install -y build-essential libssl-dev zlib1g-dev \ + libbz2-dev libreadline-dev libsqlite3-dev curl llvm \ + libncursesw5-dev xz-utils tk-dev libxml2-dev libxmlsec1-dev libffi-dev liblzma-dev \ + clang + env CC=clang python-build "$(cat "./.python-version")" "${PYTHON_PATH}" +fi +export PATH="${PYTHON_PATH}/bin:${PATH}" +command -v python3 +python3 --version + +export LINT_RUNNER_PATH="/lint_test_runner" +if [ ! -d "${LINT_RUNNER_PATH}" ]; then + ${CI_RETRY_EXE} apt-get install -y cargo + ( + cd ./test/lint/test_runner || exit 1 + cargo build + mkdir -p "${LINT_RUNNER_PATH}" + mv target/debug/test_runner "${LINT_RUNNER_PATH}" + ) fi -${CI_RETRY_EXE} pip3 install codespell==2.2.1 -${CI_RETRY_EXE} pip3 install flake8==5.0.4 -${CI_RETRY_EXE} pip3 install mypy==0.971 -${CI_RETRY_EXE} pip3 install pyzmq==24.0.1 -${CI_RETRY_EXE} pip3 install vulture==2.6 +${CI_RETRY_EXE} pip3 install \ + codespell==2.2.5 \ + flake8==6.1.0 \ + lief==0.13.2 \ + mypy==1.4.1 \ + pyzmq==25.1.0 \ + vulture==2.6 SHELLCHECK_VERSION=v0.8.0 curl -sL "https://github.com/koalaman/shellcheck/releases/download/${SHELLCHECK_VERSION}/shellcheck-${SHELLCHECK_VERSION}.linux.x86_64.tar.xz" | \ diff --git a/ci/lint/06_script.sh b/ci/lint/06_script.sh index 5d62451553c84..83732e4f35b41 100755 --- a/ci/lint/06_script.sh +++ b/ci/lint/06_script.sh @@ -6,6 +6,8 @@ export LC_ALL=C +set -ex + if [ -n "$LOCAL_BRANCH" ]; then # To faithfully recreate CI linting locally, specify all commits on the current # branch. @@ -21,15 +23,7 @@ else fi export COMMIT_RANGE -# This only checks that the trees are pure subtrees, it is not doing a full -# check with -r to not have to fetch all the remotes. -test/lint/git-subtree-check.sh src/crypto/ctaes -test/lint/git-subtree-check.sh src/secp256k1 -test/lint/git-subtree-check.sh src/minisketch -test/lint/git-subtree-check.sh src/leveldb -test/lint/git-subtree-check.sh src/crc32c -test/lint/check-doc.py -test/lint/all-lint.py +RUST_BACKTRACE=1 "${LINT_RUNNER_PATH}/test_runner" if [ "$CIRRUS_REPO_FULL_NAME" = "navcoin/navcoin" ] && [ "$CIRRUS_PR" = "" ] ; then # Sanity check only the last few commits to get notified of missing sigs, diff --git a/ci/lint/Dockerfile b/ci/lint/Dockerfile deleted file mode 100644 index a0a45164803ff..0000000000000 --- a/ci/lint/Dockerfile +++ /dev/null @@ -1,29 +0,0 @@ -# See test/lint/README.md for usage. -# -# This container basically has to live in this directory in order to pull in the CI -# install scripts. If it lived in the root directory, it would have to pull in the -# entire repo as docker context during build; if it lived elsewhere, it wouldn't be -# able to make back-references to pull in the install scripts. So here it lives. - -FROM python:3.8-buster - -ENV DEBIAN_FRONTEND=noninteractive -ENV LC_ALL=C.UTF-8 - -# This is used by the 04_install.sh script; we can't read the Python version from -# .python-version for the same reasons as above, and it's more efficient to pull a -# preexisting Python image than it is to build from source. -ENV SKIP_PYTHON_INSTALL=1 - -# Must be built from ./ci/lint/ for these paths to work. -COPY ./docker-entrypoint.sh /entrypoint.sh -COPY ./04_install.sh /install.sh - -RUN /install.sh && \ - echo 'alias lint="./ci/lint/06_script.sh"' >> ~/.bashrc && \ - chmod 755 /entrypoint.sh && \ - rm -rf /var/lib/apt/lists/* - - -WORKDIR /bitcoin -ENTRYPOINT ["/entrypoint.sh"] diff --git a/ci/lint/docker-entrypoint.sh b/ci/lint/container-entrypoint.sh similarity index 53% rename from ci/lint/docker-entrypoint.sh rename to ci/lint/container-entrypoint.sh index 3fdbbb0761c0a..a403f923a21a5 100755 --- a/ci/lint/docker-entrypoint.sh +++ b/ci/lint/container-entrypoint.sh @@ -1,10 +1,18 @@ #!/usr/bin/env bash +# +# Copyright (c) The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + export LC_ALL=C # Fixes permission issues when there is a container UID/GID mismatch with the owner # of the mounted bitcoin src dir. git config --global --add safe.directory /bitcoin +export PATH="/python_build/bin:${PATH}" +export LINT_RUNNER_PATH="/lint_test_runner" + if [ -z "$1" ]; then LOCAL_BRANCH=1 bash -ic "./ci/lint/06_script.sh" else diff --git a/ci/lint_imagefile b/ci/lint_imagefile new file mode 100644 index 0000000000000..d32b35b19d0a7 --- /dev/null +++ b/ci/lint_imagefile @@ -0,0 +1,24 @@ +# Copyright (c) The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +# See test/lint/README.md for usage. + +FROM debian:bookworm + +ENV DEBIAN_FRONTEND=noninteractive +ENV LC_ALL=C.UTF-8 + +COPY ./.python-version /.python-version +COPY ./ci/lint/container-entrypoint.sh /entrypoint.sh +COPY ./ci/lint/04_install.sh /install.sh +COPY ./test/lint/test_runner /test/lint/test_runner + +RUN /install.sh && \ + echo 'alias lint="./ci/lint/06_script.sh"' >> ~/.bashrc && \ + chmod 755 /entrypoint.sh && \ + rm -rf /var/lib/apt/lists/* + + +WORKDIR /bitcoin +ENTRYPOINT ["/entrypoint.sh"] diff --git a/ci/lint_run_all.sh b/ci/lint_run_all.sh index 7adfe71674149..b56ee0d303f9b 100755 --- a/ci/lint_run_all.sh +++ b/ci/lint_run_all.sh @@ -8,4 +8,5 @@ export LC_ALL=C.UTF-8 set -o errexit; source ./ci/test/00_setup_env.sh set -o errexit; source ./ci/lint/04_install.sh -set -o errexit; source ./ci/lint/06_script.sh +set -o errexit +./ci/lint/06_script.sh diff --git a/ci/test/00_setup_env.sh b/ci/test/00_setup_env.sh index 4a54f47b03f5a..b9e6818afbccd 100755 --- a/ci/test/00_setup_env.sh +++ b/ci/test/00_setup_env.sh @@ -6,16 +6,25 @@ export LC_ALL=C.UTF-8 -# The root dir. +set -ex + +# The source root dir, usually from git, usually read-only. # The ci system copies this folder. -BASE_ROOT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../../ >/dev/null 2>&1 && pwd ) -export BASE_ROOT_DIR +BASE_READ_ONLY_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../../ >/dev/null 2>&1 && pwd ) +export BASE_READ_ONLY_DIR +# The destination root dir inside the container. +# This folder will also hold any SDKs. +# This folder only exists on the ci guest and will be a copy of BASE_READ_ONLY_DIR +export BASE_ROOT_DIR="${BASE_ROOT_DIR:-/ci_container_base}" # The depends dir. # This folder exists only on the ci guest, and on the ci host as a volume. export DEPENDS_DIR=${DEPENDS_DIR:-$BASE_ROOT_DIR/depends} -# A folder for the ci system to put temporary files (ccache, datadirs for tests, ...) -# This folder only exists on the ci host. +# A folder for the ci system to put temporary files (build result, datadirs for tests, ...) +# This folder only exists on the ci guest. export BASE_SCRATCH_DIR=${BASE_SCRATCH_DIR:-$BASE_ROOT_DIR/ci/scratch} +# A folder for the ci system to put executables. +# This folder only exists on the ci guest. +export BINS_SCRATCH_DIR="${BASE_SCRATCH_DIR}/bins/" echo "Setting specific values in env" if [ -n "${FILE_ENV}" ]; then @@ -27,48 +36,37 @@ fi echo "Fallback to default values in env (if not yet set)" # The number of parallel jobs to pass down to make and test_runner.py export MAKEJOBS=${MAKEJOBS:--j4} -# What host to compile for. See also ./depends/README.md -# Tests that need cross-compilation export the appropriate HOST. -# Tests that run natively guess the host -export HOST=${HOST:-$("$BASE_ROOT_DIR/depends/config.guess")} # Whether to prefer BusyBox over GNU utilities export USE_BUSY_BOX=${USE_BUSY_BOX:-false} export RUN_UNIT_TESTS=${RUN_UNIT_TESTS:-true} export RUN_FUNCTIONAL_TESTS=${RUN_FUNCTIONAL_TESTS:-true} export RUN_TIDY=${RUN_TIDY:-false} -export RUN_SECURITY_TESTS=${RUN_SECURITY_TESTS:-false} # By how much to scale the test_runner timeouts (option --timeout-factor). # This is needed because some ci machines have slow CPU or disk, so sanitizers # might be slow or a reindex might be waiting on disk IO. export TEST_RUNNER_TIMEOUT_FACTOR=${TEST_RUNNER_TIMEOUT_FACTOR:-40} -export TEST_RUNNER_ENV=${TEST_RUNNER_ENV:-} export RUN_FUZZ_TESTS=${RUN_FUZZ_TESTS:-false} -export CONTAINER_NAME=${CONTAINER_NAME:-ci_unnamed} -export CI_IMAGE_NAME_TAG=${CI_IMAGE_NAME_TAG:-ubuntu:20.04} # Randomize test order. # See https://www.boost.org/doc/libs/1_71_0/libs/test/doc/html/boost_test/utf_reference/rt_param_reference/random.html export BOOST_TEST_RANDOM=${BOOST_TEST_RANDOM:-1} # See man 7 debconf export DEBIAN_FRONTEND=noninteractive -export CCACHE_SIZE=${CCACHE_SIZE:-100M} +export CCACHE_MAXSIZE=${CCACHE_MAXSIZE:-100M} export CCACHE_TEMPDIR=${CCACHE_TEMPDIR:-/tmp/.ccache-temp} export CCACHE_COMPRESS=${CCACHE_COMPRESS:-1} # The cache dir. # This folder exists only on the ci guest, and on the ci host as a volume. export CCACHE_DIR=${CCACHE_DIR:-$BASE_SCRATCH_DIR/.ccache} # Folder where the build result is put (bin and lib). -export BASE_OUTDIR=${BASE_OUTDIR:-$BASE_SCRATCH_DIR/out/$HOST} +export BASE_OUTDIR=${BASE_OUTDIR:-$BASE_SCRATCH_DIR/out} # Folder where the build is done (dist and out-of-tree build). export BASE_BUILD_DIR=${BASE_BUILD_DIR:-$BASE_SCRATCH_DIR/build} # The folder for previous release binaries. # This folder exists only on the ci guest, and on the ci host as a volume. -export PREVIOUS_RELEASES_DIR=${PREVIOUS_RELEASES_DIR:-$BASE_ROOT_DIR/releases/$HOST} -export DIR_IWYU="${BASE_SCRATCH_DIR}/iwyu" -export SDK_URL=${SDK_URL:-https://bitcoincore.org/depends-sources/sdks} -export CI_BASE_PACKAGES=${CI_BASE_PACKAGES:-build-essential libtool autotools-dev automake pkg-config bsdmainutils curl ca-certificates ccache python3 rsync git procps bison} +export PREVIOUS_RELEASES_DIR=${PREVIOUS_RELEASES_DIR:-$BASE_ROOT_DIR/prev_releases} +export CI_BASE_PACKAGES=${CI_BASE_PACKAGES:-build-essential libtool autotools-dev automake pkg-config bsdmainutils curl ca-certificates ccache python3 rsync git procps bison e2fsprogs cmake} export GOAL=${GOAL:-install} export DIR_QA_ASSETS=${DIR_QA_ASSETS:-${BASE_SCRATCH_DIR}/qa-assets} -export PATH=${BASE_ROOT_DIR}/ci/retry:$PATH export CI_RETRY_EXE=${CI_RETRY_EXE:-"retry --"} diff --git a/ci/test/00_setup_env_android.sh b/ci/test/00_setup_env_android.sh index 8ce8f9ca8febc..5c75c14d38033 100755 --- a/ci/test/00_setup_env_android.sh +++ b/ci/test/00_setup_env_android.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2023 The Bitcoin Core developers +# Copyright (c) 2019-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -9,7 +9,7 @@ export LC_ALL=C.UTF-8 export HOST=aarch64-linux-android export PACKAGES="unzip openjdk-8-jdk gradle" export CONTAINER_NAME=ci_android -export CI_IMAGE_NAME_TAG="ubuntu:jammy" +export CI_IMAGE_NAME_TAG="docker.io/amd64/ubuntu:22.04" export RUN_UNIT_TESTS=false export RUN_FUNCTIONAL_TESTS=false diff --git a/ci/test/00_setup_env_arm.sh b/ci/test/00_setup_env_arm.sh index c26518ae94c57..0de15e435788a 100755 --- a/ci/test/00_setup_env_arm.sh +++ b/ci/test/00_setup_env_arm.sh @@ -7,18 +7,10 @@ export LC_ALL=C.UTF-8 export HOST=arm-linux-gnueabihf -# The host arch is unknown, so we run the tests through qemu. -# If the host is arm and wants to run the tests natively, it can set QEMU_USER_CMD to the empty string. -if [ -z ${QEMU_USER_CMD+x} ]; then export QEMU_USER_CMD="${QEMU_USER_CMD:-"qemu-arm -L /usr/arm-linux-gnueabihf/"}"; fi export DPKG_ADD_ARCH="armhf" export PACKAGES="python3-zmq g++-arm-linux-gnueabihf busybox libc6:armhf libstdc++6:armhf libfontconfig1:armhf libxcb1:armhf" -if [ -n "$QEMU_USER_CMD" ]; then - # Likely cross-compiling, so install the needed gcc and qemu-user - export PACKAGES="$PACKAGES qemu-user" -fi export CONTAINER_NAME=ci_arm_linux -# Use debian to avoid 404 apt errors when cross compiling -export CI_IMAGE_NAME_TAG="debian:bookworm" +export CI_IMAGE_NAME_TAG="docker.io/arm64v8/debian:bookworm" export USE_BUSY_BOX=true export RUN_UNIT_TESTS=true export RUN_FUNCTIONAL_TESTS=false diff --git a/ci/test/00_setup_env_i686_centos.sh b/ci/test/00_setup_env_i686_centos.sh index e800fc75b004d..e6e6e5afd9966 100755 --- a/ci/test/00_setup_env_i686_centos.sh +++ b/ci/test/00_setup_env_i686_centos.sh @@ -8,12 +8,10 @@ export LC_ALL=C.UTF-8 export HOST=i686-pc-linux-gnu export CONTAINER_NAME=ci_i686_centos -export CI_IMAGE_NAME_TAG="quay.io/centos/centos:stream8" -# Use minimum supported python3.8 and gcc-8, see doc/dependencies.md -export CI_BASE_PACKAGES="gcc-c++ glibc-devel.x86_64 libstdc++-devel.x86_64 glibc-devel.i686 libstdc++-devel.i686 ccache libtool make git python38 python38-pip which patch lbzip2 xz procps-ng dash rsync coreutils bison" +export CI_IMAGE_NAME_TAG="quay.io/centos/amd64:stream9" +export CI_BASE_PACKAGES="gcc-c++ glibc-devel.x86_64 libstdc++-devel.x86_64 glibc-devel.i686 libstdc++-devel.i686 ccache libtool make git python3 python3-pip which patch lbzip2 xz procps-ng dash rsync coreutils bison util-linux e2fsprogs cmake" export PIP_PACKAGES="pyzmq" export GOAL="install" -export NO_WERROR=1 # GCC 8 +export NO_WERROR=1 # Suppress error: #warning _FORTIFY_SOURCE > 2 is treated like 2 on this platform [-Werror=cpp] export BITCOIN_CONFIG="--enable-zmq --enable-reduce-exports" export CONFIG_SHELL="/bin/dash" -export TEST_RUNNER_ENV="LC_ALL=en_US.UTF-8" diff --git a/ci/test/00_setup_env_i686_multiprocess.sh b/ci/test/00_setup_env_i686_multiprocess.sh index c598ee50424bd..f31de86ca1105 100755 --- a/ci/test/00_setup_env_i686_multiprocess.sh +++ b/ci/test/00_setup_env_i686_multiprocess.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # -# Copyright (c) 2020-2022 The Bitcoin Core developers +# Copyright (c) 2020-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -8,10 +8,10 @@ export LC_ALL=C.UTF-8 export HOST=i686-pc-linux-gnu export CONTAINER_NAME=ci_i686_multiprocess -export CI_IMAGE_NAME_TAG="docker.io/amd64/ubuntu:20.04" -export PACKAGES="cmake python3 llvm clang g++-multilib" +export CI_IMAGE_NAME_TAG="docker.io/amd64/ubuntu:22.04" +export PACKAGES="llvm clang g++-multilib" export DEP_OPTS="DEBUG=1 MULTIPROCESS=1" export GOAL="install" -export BITCOIN_CONFIG="--enable-debug CC='clang -m32' CXX='clang++ -m32' LDFLAGS='--rtlib=compiler-rt -lgcc_s'" -export TEST_RUNNER_ENV="BITCOIND=navcoin-node" -export TEST_RUNNER_EXTRA="--nosandbox" +export BITCOIN_CONFIG="--enable-debug CC='clang -m32' CXX='clang++ -m32' \ +CPPFLAGS='-DBOOST_MULTI_INDEX_ENABLE_SAFE_MODE'" +export BITCOIND=navcoin-node # Used in functional tests diff --git a/ci/test/00_setup_env_mac.sh b/ci/test/00_setup_env_mac.sh deleted file mode 100755 index e454282b695e7..0000000000000 --- a/ci/test/00_setup_env_mac.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2019-2021 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C.UTF-8 - -export CONTAINER_NAME=ci_macos_cross -export CI_IMAGE_NAME_TAG=ubuntu:22.04 # Check that Jammy can cross-compile to macos -export HOST=x86_64-apple-darwin -export PACKAGES="cmake libz-dev libtinfo5 python3-setuptools xorriso zip" -export XCODE_VERSION=12.2 -export XCODE_BUILD_ID=12B45b -export RUN_UNIT_TESTS=false -export RUN_FUNCTIONAL_TESTS=false -export GOAL="deploy" -export BITCOIN_CONFIG="--enable-reduce-exports LDFLAGS=-Wno-error=unused-command-line-argument" diff --git a/ci/test/00_setup_env_mac_cross.sh b/ci/test/00_setup_env_mac_cross.sh new file mode 100755 index 0000000000000..69a114c31427b --- /dev/null +++ b/ci/test/00_setup_env_mac_cross.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export SDK_URL=${SDK_URL:-https://bitcoincore.org/depends-sources/sdks} + +export CONTAINER_NAME=ci_macos_cross +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:22.04" +export HOST=x86_64-apple-darwin +export PACKAGES="zip" +export XCODE_VERSION=15.0 +export XCODE_BUILD_ID=15A240d +export RUN_UNIT_TESTS=false +export RUN_FUNCTIONAL_TESTS=false +export GOAL="deploy" +export BITCOIN_CONFIG="--enable-reduce-exports" diff --git a/ci/test/00_setup_env_mac_native_arm64.sh b/ci/test/00_setup_env_mac_native.sh similarity index 72% rename from ci/test/00_setup_env_mac_native_arm64.sh rename to ci/test/00_setup_env_mac_native.sh index ceaf2ce821d5b..cd5be896d9e89 100755 --- a/ci/test/00_setup_env_mac_native_arm64.sh +++ b/ci/test/00_setup_env_mac_native.sh @@ -1,16 +1,17 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2022 The Bitcoin Core developers +# Copyright (c) 2019-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 -export HOST=arm64-apple-darwin +export HOST=x86_64-apple-darwin export PIP_PACKAGES="zmq" export GOAL="install" export BITCOIN_CONFIG="--with-miniupnpc --with-natpmp --enable-reduce-exports" export CI_OS_NAME="macos" export NO_DEPENDS=1 export OSX_SDK="" -export CCACHE_SIZE=300M +export CCACHE_MAXSIZE=400M +export RUN_FUZZ_TESTS=true diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh index 028b54c9e05eb..c5f6171e6790a 100755 --- a/ci/test/00_setup_env_native_asan.sh +++ b/ci/test/00_setup_env_native_asan.sh @@ -1,21 +1,26 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2022 The Bitcoin Core developers +# Copyright (c) 2019-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:24.04" # Only install BCC tracing packages in Cirrus CI. if [[ "${CIRRUS_CI}" == "true" ]]; then - export BPFCC_PACKAGE="bpfcc-tools" + BPFCC_PACKAGE="bpfcc-tools linux-headers-$(uname --kernel-release)" + export CI_CONTAINER_CAP="--privileged -v /sys/kernel:/sys/kernel:rw" else - export BPFCC_PACKAGE="" + BPFCC_PACKAGE="" + export CI_CONTAINER_CAP="--cap-add SYS_PTRACE" # If run with (ASan + LSan), the container needs access to ptrace (https://github.com/google/sanitizers/issues/764) fi export CONTAINER_NAME=ci_native_asan -export PACKAGES="systemtap-sdt-dev clang-16 llvm-16 libclang-rt-16-dev python3-zmq libevent-dev bsdmainutils libboost-dev libdb5.3++-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev libsqlite3-dev ${BPFCC_PACKAGE}" -export CI_IMAGE_NAME_TAG=ubuntu:23.04 # Version 23.04 will reach EOL in Jan 2024, and can be replaced by "ubuntu:24.04" (or anything else that ships the wanted clang version). +export PACKAGES="systemtap-sdt-dev clang-17 llvm-17 libclang-rt-17-dev python3-zmq libevent-dev libboost-dev libdb5.3++-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev libqrencode-dev libsqlite3-dev ${BPFCC_PACKAGE}" export NO_DEPENDS=1 export GOAL="install" -export BITCOIN_CONFIG="--enable-c++20 --enable-usdt --enable-zmq --with-incompatible-bdb CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' --with-sanitizers=address,integer,undefined CC=clang-16 CXX=clang++-16" +export BITCOIN_CONFIG="--enable-c++20 --enable-usdt --enable-zmq --with-incompatible-bdb \ +CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' \ +--with-sanitizers=address,float-divide-by-zero,integer,undefined \ +CC='clang-17 -ftrivial-auto-var-init=pattern' CXX='clang++-17 -ftrivial-auto-var-init=pattern'" diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh index fce8794071069..abee3c154169a 100755 --- a/ci/test/00_setup_env_native_fuzz.sh +++ b/ci/test/00_setup_env_native_fuzz.sh @@ -1,18 +1,21 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2023 The Bitcoin Core developers +# Copyright (c) 2019-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 -export CI_IMAGE_NAME_TAG="ubuntu:23.04" # Version 23.04 will reach EOL in Jan 2024, and can be replaced by "ubuntu:24.04" (or anything else that ships the wanted clang version). +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:24.04" export CONTAINER_NAME=ci_native_fuzz -export PACKAGES="clang-16 llvm-16 libclang-rt-16-dev python3 libevent-dev bsdmainutils libboost-dev libsqlite3-dev" +export PACKAGES="clang-17 llvm-17 libclang-rt-17-dev libevent-dev libboost-dev libsqlite3-dev" export NO_DEPENDS=1 export RUN_UNIT_TESTS=false export RUN_FUNCTIONAL_TESTS=false export RUN_FUZZ_TESTS=true export GOAL="install" -export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,address,undefined,integer CC='clang-16 -ftrivial-auto-var-init=pattern' CXX='clang++-16 -ftrivial-auto-var-init=pattern'" -export CCACHE_SIZE=200M +export CI_CONTAINER_CAP="--cap-add SYS_PTRACE" # If run with (ASan + LSan), the container needs access to ptrace (https://github.com/google/sanitizers/issues/764) +export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,address,undefined,float-divide-by-zero,integer \ +CC='clang-17 -ftrivial-auto-var-init=pattern' CXX='clang++-17 -ftrivial-auto-var-init=pattern'" +export CCACHE_MAXSIZE=200M +export LLVM_SYMBOLIZER_PATH="/usr/bin/llvm-symbolizer-17" diff --git a/ci/test/00_setup_env_native_fuzz_with_msan.sh b/ci/test/00_setup_env_native_fuzz_with_msan.sh index 9cea5b84de4a7..06697aa440296 100755 --- a/ci/test/00_setup_env_native_fuzz_with_msan.sh +++ b/ci/test/00_setup_env_native_fuzz_with_msan.sh @@ -1,25 +1,25 @@ #!/usr/bin/env bash # -# Copyright (c) 2020-2023 The Bitcoin Core developers +# Copyright (c) 2020-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 -export CI_IMAGE_NAME_TAG="ubuntu:23.04" # Version 23.04 will reach EOL in Jan 2024, and can be replaced by "ubuntu:24.04" (or anything else that ships the wanted clang version). -LIBCXX_DIR="${BASE_SCRATCH_DIR}/msan/build/" +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:22.04" +LIBCXX_DIR="/msan/cxx_build/" export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls" -LIBCXX_FLAGS="-nostdinc++ -stdlib=libc++ -L${LIBCXX_DIR}lib -lc++abi -I${LIBCXX_DIR}include -I${LIBCXX_DIR}include/c++/v1 -lpthread -Wl,-rpath,${LIBCXX_DIR}lib -Wno-unused-command-line-argument" +LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}" export CONTAINER_NAME="ci_native_fuzz_msan" -export PACKAGES="clang-16 llvm-16 libclang-rt-16-dev cmake" +export PACKAGES="ninja-build" # BDB generates false-positives and will be removed in future -export DEP_OPTS="NO_BDB=1 CC='clang' CXX='clang++' CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'" +export DEP_OPTS="NO_BDB=1 CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'" export GOAL="install" -export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,memory --disable-hardening --with-asm=no CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'" +export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,memory --disable-hardening --with-asm=no CFLAGS='${MSAN_FLAGS}' CPPFLAGS='-DBOOST_MULTI_INDEX_ENABLE_SAFE_MODE' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'" export USE_MEMORY_SANITIZER="true" export RUN_UNIT_TESTS="false" export RUN_FUNCTIONAL_TESTS="false" export RUN_FUZZ_TESTS=true -export CCACHE_SIZE=250M +export CCACHE_MAXSIZE=250M diff --git a/ci/test/00_setup_env_native_fuzz_with_valgrind.sh b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh index 3511f65000564..1f60c4680368c 100755 --- a/ci/test/00_setup_env_native_fuzz_with_valgrind.sh +++ b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh @@ -1,14 +1,14 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2023 The Bitcoin Core developers +# Copyright (c) 2019-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 -export CI_IMAGE_NAME_TAG="debian:bookworm" +export CI_IMAGE_NAME_TAG="docker.io/debian:bookworm" export CONTAINER_NAME=ci_native_fuzz_valgrind -export PACKAGES="clang llvm libclang-rt-dev python3 libevent-dev bsdmainutils libboost-dev libsqlite3-dev valgrind" +export PACKAGES="clang llvm libclang-rt-dev libevent-dev libboost-dev libsqlite3-dev valgrind" export NO_DEPENDS=1 export RUN_UNIT_TESTS=false export RUN_FUNCTIONAL_TESTS=false @@ -16,5 +16,5 @@ export RUN_FUZZ_TESTS=true export FUZZ_TESTS_CONFIG="--valgrind" export GOAL="install" # Temporarily pin dwarf 4, until using Valgrind 3.20 or later -export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer CC=clang CXX=clang++ CFLAGS='-gdwarf-4' CXXFLAGS='-gdwarf-4'" -export CCACHE_SIZE=200M +export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer CC='clang -gdwarf-4' CXX='clang++ -gdwarf-4'" +export CCACHE_MAXSIZE=200M diff --git a/ci/test/00_setup_env_native_libblsct_only.sh b/ci/test/00_setup_env_native_libblsct_only.sh new file mode 100755 index 0000000000000..550cb580afcaa --- /dev/null +++ b/ci/test/00_setup_env_native_libblsct_only.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2024-present The Navcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export CONTAINER_NAME=ci_native_libblsct_only +export CI_IMAGE_NAME_TAG="docker.io/debian:bullseye" +export PACKAGES="clang-13 llvm-13 libc++abi-13-dev libc++-13-dev" +export DEP_OPTS="NO_WALLET=1 CC=clang-13 CXX='clang++-13 -stdlib=libc++'" +export GOAL="install" +export RUN_UNIT_TESTS=false +export RUN_FUNCTIONAL_TESTS=false +export BITCOIN_CONFIG="--enable-build-libblsct-only" diff --git a/ci/test/00_setup_env_native_msan.sh b/ci/test/00_setup_env_native_msan.sh index 03690e757d5d8..fa98282145007 100755 --- a/ci/test/00_setup_env_native_msan.sh +++ b/ci/test/00_setup_env_native_msan.sh @@ -1,23 +1,23 @@ #!/usr/bin/env bash # -# Copyright (c) 2020-2023 The Bitcoin Core developers +# Copyright (c) 2020-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 -export CI_IMAGE_NAME_TAG="ubuntu:23.04" # Version 23.04 will reach EOL in Jan 2024, and can be replaced by "ubuntu:24.04" (or anything else that ships the wanted clang version). -LIBCXX_DIR="${BASE_SCRATCH_DIR}/msan/build/" -export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls -fsanitize-blacklist=${BASE_ROOT_DIR}/test/sanitizer_suppressions/msan" -LIBCXX_FLAGS="-nostdinc++ -stdlib=libc++ -L${LIBCXX_DIR}lib -lc++abi -I${LIBCXX_DIR}include -I${LIBCXX_DIR}include/c++/v1 -lpthread -Wl,-rpath,${LIBCXX_DIR}lib -Wno-unused-command-line-argument" +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:22.04" +LIBCXX_DIR="/msan/cxx_build/" +export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls" +LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}" export CONTAINER_NAME="ci_native_msan" -export PACKAGES="clang-16 llvm-16 libclang-rt-16-dev cmake" +export PACKAGES="ninja-build" # BDB generates false-positives and will be removed in future -export DEP_OPTS="NO_BDB=1 CC='clang' CXX='clang++' CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'" +export DEP_OPTS="NO_BDB=1 CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'" export GOAL="install" -export BITCOIN_CONFIG="--with-sanitizers=memory --disable-hardening --with-asm=no CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'" +export BITCOIN_CONFIG="--with-sanitizers=memory --disable-hardening --with-asm=no CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'" export USE_MEMORY_SANITIZER="true" export RUN_FUNCTIONAL_TESTS="false" -export CCACHE_SIZE=250M +export CCACHE_MAXSIZE=250M diff --git a/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh b/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh index 73e9ea1fc5799..6f0b9cc285072 100755 --- a/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh +++ b/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh @@ -1,16 +1,15 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2022 The Bitcoin Core developers +# Copyright (c) 2019-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_nowallet_libbitcoinkernel -export CI_IMAGE_NAME_TAG="docker.io/ubuntu:20.04" -# Use minimum supported python3.8 and clang-8, see doc/dependencies.md -export PACKAGES="python3-zmq clang-8 llvm-8 libc++abi-8-dev libc++-8-dev" -export DEP_OPTS="NO_WALLET=1 CC=clang-8 CXX='clang++-8 -stdlib=libc++'" +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:22.04" +# Use minimum supported python3.9 (or best-effort 3.10) and clang-14, see doc/dependencies.md +export PACKAGES="python3-zmq clang-14 llvm-14 libc++abi-14-dev libc++-14-dev" +export DEP_OPTS="NO_WALLET=1 CC=clang-14 CXX='clang++-14 -stdlib=libc++'" export GOAL="install" -export NO_WERROR=1 -export BITCOIN_CONFIG="--enable-reduce-exports CC=clang-8 CXX='clang++-8 -stdlib=libc++' --enable-experimental-util-chainstate --with-experimental-kernel-lib --enable-shared" +export BITCOIN_CONFIG="--enable-reduce-exports --enable-experimental-util-chainstate --with-experimental-kernel-lib --enable-shared" diff --git a/ci/test/00_setup_env_native.sh b/ci/test/00_setup_env_native_previous_releases.sh similarity index 53% rename from ci/test/00_setup_env_native.sh rename to ci/test/00_setup_env_native_previous_releases.sh index a025ef6d54096..a6d1505d920ab 100755 --- a/ci/test/00_setup_env_native.sh +++ b/ci/test/00_setup_env_native_previous_releases.sh @@ -1,21 +1,20 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2022 The Bitcoin Core developers +# Copyright (c) 2019-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 -export CONTAINER_NAME=ci_native -export CI_IMAGE_NAME_TAG="ubuntu:20.04" -# Use minimum supported python3.8 and gcc-8 (or best-effort gcc-9), see doc/dependencies.md -export PACKAGES="gcc-9 g++-9 python3-zmq libdbus-1-dev libharfbuzz-dev" -export DEP_OPTS="NO_UPNP=1 NO_NATPMP=1 DEBUG=1 ALLOW_HOST_PACKAGES=1 CC=gcc-9 CXX=g++-9" +export CONTAINER_NAME=ci_native_previous_releases +export CI_IMAGE_NAME_TAG="docker.io/debian:bullseye" +# Use minimum supported python3.9 and gcc-10, see doc/dependencies.md +export PACKAGES="gcc-10 g++-10 python3-zmq" +export DEP_OPTS="NO_UPNP=1 NO_NATPMP=1 DEBUG=1 CC=gcc-10 CXX=g++-10" export TEST_RUNNER_EXTRA="--previous-releases --coverage --extended --exclude feature_dbcrash" # Run extended tests so that coverage does not fail, but exclude the very slow dbcrash export RUN_UNIT_TESTS_SEQUENTIAL="true" export RUN_UNIT_TESTS="false" export GOAL="install" -export NO_WERROR=1 export DOWNLOAD_PREVIOUS_RELEASES="true" -export BITCOIN_CONFIG="--enable-zmq --with-libs=no --enable-reduce-exports \ ---enable-debug CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\"" +export BITCOIN_CONFIG="--enable-zmq --with-libs=no --enable-reduce-exports --enable-debug \ +CFLAGS=\"-g0 -O2 -funsigned-char\" CPPFLAGS='-DBOOST_MULTI_INDEX_ENABLE_SAFE_MODE' CXXFLAGS=\"-g0 -O2 -funsigned-char\"" diff --git a/ci/test/00_setup_env_native_tidy.sh b/ci/test/00_setup_env_native_tidy.sh index ee9e6810312b9..2b3ab57e33443 100755 --- a/ci/test/00_setup_env_native_tidy.sh +++ b/ci/test/00_setup_env_native_tidy.sh @@ -1,19 +1,20 @@ #!/usr/bin/env bash # -# Copyright (c) 2023 The Bitcoin Core developers +# Copyright (c) 2023-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 -export CI_IMAGE_NAME_TAG="ubuntu:lunar" # Version 23.04 will reach EOL in Jan 2024, and can be replaced by "ubuntu:24.04" (or anything else that ships the wanted clang version). +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:24.04" export CONTAINER_NAME=ci_native_tidy -export PACKAGES="clang-16 libclang-16-dev llvm-16-dev libomp-16-dev clang-tidy-16 bear cmake libevent-dev libboost-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev systemtap-sdt-dev libsqlite3-dev libdb++-dev" +export TIDY_LLVM_V="17" +export PACKAGES="clang-${TIDY_LLVM_V} libclang-${TIDY_LLVM_V}-dev llvm-${TIDY_LLVM_V}-dev libomp-${TIDY_LLVM_V}-dev clang-tidy-${TIDY_LLVM_V} jq bear libevent-dev libboost-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev systemtap-sdt-dev libsqlite3-dev libdb++-dev" export NO_DEPENDS=1 export RUN_UNIT_TESTS=false export RUN_FUNCTIONAL_TESTS=false export RUN_FUZZ_TESTS=false export RUN_TIDY=true export GOAL="install" -export BITCOIN_CONFIG="CC=clang-16 CXX=clang++-16 --with-incompatible-bdb --disable-hardening CFLAGS='-O0 -g0' CXXFLAGS='-O0 -g0 -I/usr/lib/llvm-16/lib/clang/16/include'" -export CCACHE_SIZE=200M +export BITCOIN_CONFIG="CC=clang-${TIDY_LLVM_V} CXX=clang++-${TIDY_LLVM_V} --with-incompatible-bdb --disable-hardening CFLAGS='-O0 -g0' CXXFLAGS='-O0 -g0 -I/usr/lib/llvm-${TIDY_LLVM_V}/lib/clang/${TIDY_LLVM_V}/include'" +export CCACHE_MAXSIZE=200M diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh index 8ebb1fa563ccd..aa23bad8091fd 100755 --- a/ci/test/00_setup_env_native_tsan.sh +++ b/ci/test/00_setup_env_native_tsan.sh @@ -1,14 +1,14 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2023 The Bitcoin Core developers +# Copyright (c) 2019-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_tsan -export CI_IMAGE_NAME_TAG=ubuntu:23.04 # Version 23.04 will reach EOL in Jan 2024, and can be replaced by "ubuntu:24.04" (or anything else that ships the wanted clang version). -export PACKAGES="clang-16 llvm-16 libclang-rt-16-dev libc++abi-16-dev libc++-16-dev python3-zmq" -export DEP_OPTS="CC=clang-16 CXX='clang++-16 -stdlib=libc++'" +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:24.04" +export PACKAGES="clang-17 llvm-17 libclang-rt-17-dev libc++abi-17-dev libc++-17-dev python3-zmq" +export DEP_OPTS="CC=clang-17 CXX='clang++-17 -stdlib=libc++'" export GOAL="install" export BITCOIN_CONFIG="--enable-zmq CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER -DDEBUG_LOCKCONTENTION' CXXFLAGS='-g' --with-sanitizers=thread" diff --git a/ci/test/00_setup_env_native_valgrind.sh b/ci/test/00_setup_env_native_valgrind.sh index 2f2a715a5675f..7f90aabe04d9d 100755 --- a/ci/test/00_setup_env_native_valgrind.sh +++ b/ci/test/00_setup_env_native_valgrind.sh @@ -1,17 +1,17 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2022 The Bitcoin Core developers +# Copyright (c) 2019-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 -export CI_IMAGE_NAME_TAG="debian:bookworm" +export CI_IMAGE_NAME_TAG="docker.io/debian:bookworm" export CONTAINER_NAME=ci_native_valgrind -export PACKAGES="valgrind clang llvm libclang-rt-dev python3-zmq libevent-dev bsdmainutils libboost-dev libdb5.3++-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev libsqlite3-dev" +export PACKAGES="valgrind clang llvm libclang-rt-dev python3-zmq libevent-dev libboost-dev libdb5.3++-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev libsqlite3-dev" export USE_VALGRIND=1 export NO_DEPENDS=1 -export TEST_RUNNER_EXTRA="--nosandbox --exclude feature_init,rpc_bind,feature_bind_extra" # Excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547 +export TEST_RUNNER_EXTRA="--exclude feature_init,rpc_bind,feature_bind_extra" # Excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547 export GOAL="install" # Temporarily pin dwarf 4, until using Valgrind 3.20 or later -export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb CC=clang CXX=clang++ CFLAGS='-gdwarf-4' CXXFLAGS='-gdwarf-4'" +export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb CC='clang -gdwarf-4' CXX='clang++ -gdwarf-4'" diff --git a/ci/test/00_setup_env_s390x.sh b/ci/test/00_setup_env_s390x.sh index 523e81c94a2da..ca84ecce5153c 100755 --- a/ci/test/00_setup_env_s390x.sh +++ b/ci/test/00_setup_env_s390x.sh @@ -1,25 +1,15 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2022 The Bitcoin Core developers +# Copyright (c) 2019-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 export HOST=s390x-linux-gnu -# The host arch is unknown, so we run the tests through qemu. -# If the host is s390x and wants to run the tests natively, it can set QEMU_USER_CMD to the empty string. -if [ -z ${QEMU_USER_CMD+x} ]; then export QEMU_USER_CMD="${QEMU_USER_CMD:-"qemu-s390x"}"; fi export PACKAGES="python3-zmq" -if [ -n "$QEMU_USER_CMD" ]; then - # Likely cross-compiling, so install the needed gcc and qemu-user - export DPKG_ADD_ARCH="s390x" - export PACKAGES="$PACKAGES g++-s390x-linux-gnu qemu-user libc6:s390x libstdc++6:s390x" -fi -# Use debian to avoid 404 apt errors export CONTAINER_NAME=ci_s390x -export CI_IMAGE_NAME_TAG="debian:bookworm" -export TEST_RUNNER_ENV="LC_ALL=C" +export CI_IMAGE_NAME_TAG="docker.io/s390x/debian:bookworm" export TEST_RUNNER_EXTRA="--exclude feature_init,rpc_bind,feature_bind_extra" # Excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547 export RUN_FUNCTIONAL_TESTS=true export GOAL="install" diff --git a/ci/test/00_setup_env_win64.sh b/ci/test/00_setup_env_win64.sh index 9bf4d1948344b..b85521b4f2a4c 100755 --- a/ci/test/00_setup_env_win64.sh +++ b/ci/test/00_setup_env_win64.sh @@ -1,16 +1,19 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2023 The Bitcoin Core developers +# Copyright (c) 2019-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_win64 -export CI_IMAGE_NAME_TAG=ubuntu:22.04 # Check that Jammy can cross-compile to win64 +export CI_IMAGE_NAME_TAG="docker.io/amd64/ubuntu:22.04" # Check that Jammy can cross-compile to win64 export HOST=x86_64-w64-mingw32 export DPKG_ADD_ARCH="i386" -export PACKAGES="python3 g++-mingw-w64-x86-64-posix wine-binfmt wine64 wine32 file" +export PACKAGES="nsis g++-mingw-w64-x86-64-posix wine-binfmt wine64 wine32 file" export RUN_FUNCTIONAL_TESTS=false export GOAL="install" -export BITCOIN_CONFIG="--enable-reduce-exports --enable-external-signer" +# Prior to 11.0.0, the mingw-w64 headers were missing noreturn attributes, causing warnings when +# cross-compiling for Windows. https://sourceforge.net/p/mingw-w64/bugs/306/ +# https://github.com/mingw-w64/mingw-w64/commit/1690994f515910a31b9fb7c7bd3a52d4ba987abe +export BITCOIN_CONFIG="--enable-reduce-exports CXXFLAGS=-Wno-return-type" diff --git a/ci/test/01_base_install.sh b/ci/test/01_base_install.sh index 641ff964f33ab..b15df4b6cc860 100755 --- a/ci/test/01_base_install.sh +++ b/ci/test/01_base_install.sh @@ -6,6 +6,8 @@ export LC_ALL=C.UTF-8 +set -ex + CFG_DONE="ci.base-install-done" # Use a global git setting to remember whether this script ran to avoid running it twice if [ "$(git config --global ${CFG_DONE})" == "true" ]; then @@ -18,9 +20,9 @@ if [ -n "$DPKG_ADD_ARCH" ]; then fi if [[ $CI_IMAGE_NAME_TAG == *centos* ]]; then - ${CI_RETRY_EXE} bash -c "dnf -y install epel-release" - ${CI_RETRY_EXE} bash -c "dnf -y --allowerasing install $CI_BASE_PACKAGES $PACKAGES" -elif [ "$CI_USE_APT_INSTALL" != "no" ]; then + bash -c "dnf -y install epel-release" + bash -c "dnf -y --allowerasing install $CI_BASE_PACKAGES $PACKAGES" +elif [ "$CI_OS_NAME" != "macos" ]; then if [[ -n "${APPEND_APT_SOURCES_LIST}" ]]; then echo "${APPEND_APT_SOURCES_LIST}" >> /etc/apt/sources.list fi @@ -29,28 +31,45 @@ elif [ "$CI_USE_APT_INSTALL" != "no" ]; then fi if [ -n "$PIP_PACKAGES" ]; then - if [ "$CI_OS_NAME" == "macos" ]; then - sudo -H pip3 install --upgrade pip - # shellcheck disable=SC2086 - IN_GETOPT_BIN="$(brew --prefix gnu-getopt)/bin/getopt" ${CI_RETRY_EXE} pip3 install --user $PIP_PACKAGES - else - # shellcheck disable=SC2086 - ${CI_RETRY_EXE} pip3 install --user $PIP_PACKAGES - fi + # shellcheck disable=SC2086 + ${CI_RETRY_EXE} pip3 install --user $PIP_PACKAGES fi if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then - update-alternatives --install /usr/bin/clang++ clang++ "$(which clang++-16)" 100 - update-alternatives --install /usr/bin/clang clang "$(which clang-16)" 100 - git clone --depth=1 https://github.com/llvm/llvm-project -b llvmorg-16.0.1 "${BASE_SCRATCH_DIR}"/msan/llvm-project - cmake -B "${BASE_SCRATCH_DIR}"/msan/build/ -DLLVM_ENABLE_RUNTIMES='libcxx;libcxxabi' -DCMAKE_BUILD_TYPE=Release -DLLVM_USE_SANITIZER=MemoryWithOrigins -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DLLVM_TARGETS_TO_BUILD=X86 -DLLVM_ENABLE_PER_TARGET_RUNTIME_DIR=OFF -DLIBCXX_ENABLE_DEBUG_MODE=ON -DLIBCXX_ENABLE_ASSERTIONS=ON -S "${BASE_SCRATCH_DIR}"/msan/llvm-project/runtimes - make -C "${BASE_SCRATCH_DIR}"/msan/build/ "$MAKEJOBS" + ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b llvmorg-17.0.2 /msan/llvm-project + + cmake -G Ninja -B /msan/clang_build/ \ + -DLLVM_ENABLE_PROJECTS="clang" \ + -DCMAKE_BUILD_TYPE=Release \ + -DLLVM_TARGETS_TO_BUILD=Native \ + -DLLVM_ENABLE_RUNTIMES="compiler-rt;libcxx;libcxxabi;libunwind" \ + -S /msan/llvm-project/llvm + + ninja -C /msan/clang_build/ "-j$( nproc )" # Use nproc, because MAKEJOBS is the default in docker image builds + ninja -C /msan/clang_build/ install-runtimes + + update-alternatives --install /usr/bin/clang++ clang++ /msan/clang_build/bin/clang++ 100 + update-alternatives --install /usr/bin/clang clang /msan/clang_build/bin/clang 100 + update-alternatives --install /usr/bin/llvm-symbolizer llvm-symbolizer /msan/clang_build/bin/llvm-symbolizer 100 + + cmake -G Ninja -B /msan/cxx_build/ \ + -DLLVM_ENABLE_RUNTIMES='libcxx;libcxxabi' \ + -DCMAKE_BUILD_TYPE=Release \ + -DLLVM_USE_SANITIZER=MemoryWithOrigins \ + -DCMAKE_C_COMPILER=clang \ + -DCMAKE_CXX_COMPILER=clang++ \ + -DLLVM_TARGETS_TO_BUILD=Native \ + -DLLVM_ENABLE_PER_TARGET_RUNTIME_DIR=OFF \ + -DLIBCXX_HARDENING_MODE=debug \ + -S /msan/llvm-project/runtimes + + ninja -C /msan/cxx_build/ "-j$( nproc )" # Use nproc, because MAKEJOBS is the default in docker image builds fi if [[ "${RUN_TIDY}" == "true" ]]; then - git clone --depth=1 https://github.com/include-what-you-use/include-what-you-use -b clang_16 "${DIR_IWYU}"/include-what-you-use - cmake -B "${DIR_IWYU}"/build/ -G 'Unix Makefiles' -DCMAKE_PREFIX_PATH=/usr/lib/llvm-16 -S "${DIR_IWYU}"/include-what-you-use - make -C "${DIR_IWYU}"/build/ install "$MAKEJOBS" + ${CI_RETRY_EXE} git clone --depth=1 https://github.com/include-what-you-use/include-what-you-use -b clang_"${TIDY_LLVM_V}" /include-what-you-use + cmake -B /iwyu-build/ -G 'Unix Makefiles' -DCMAKE_PREFIX_PATH=/usr/lib/llvm-"${TIDY_LLVM_V}" -S /include-what-you-use + make -C /iwyu-build/ install "-j$( nproc )" # Use nproc, because MAKEJOBS is the default in docker image builds fi mkdir -p "${DEPENDS_DIR}/SDKs" "${DEPENDS_DIR}/sdk-sources" @@ -61,7 +80,7 @@ if [ -n "$XCODE_VERSION" ] && [ ! -d "${DEPENDS_DIR}/SDKs/${OSX_SDK_BASENAME}" ] OSX_SDK_FILENAME="${OSX_SDK_BASENAME}.tar.gz" OSX_SDK_PATH="${DEPENDS_DIR}/sdk-sources/${OSX_SDK_FILENAME}" if [ ! -f "$OSX_SDK_PATH" ]; then - curl --location --fail "${SDK_URL}/${OSX_SDK_FILENAME}" -o "$OSX_SDK_PATH" + ${CI_RETRY_EXE} curl --location --fail "${SDK_URL}/${OSX_SDK_FILENAME}" -o "$OSX_SDK_PATH" fi tar -C "${DEPENDS_DIR}/SDKs" -xf "$OSX_SDK_PATH" fi @@ -69,11 +88,11 @@ fi if [ -n "$ANDROID_HOME" ] && [ ! -d "$ANDROID_HOME" ]; then ANDROID_TOOLS_PATH=${DEPENDS_DIR}/sdk-sources/android-tools.zip if [ ! -f "$ANDROID_TOOLS_PATH" ]; then - curl --location --fail "${ANDROID_TOOLS_URL}" -o "$ANDROID_TOOLS_PATH" + ${CI_RETRY_EXE} curl --location --fail "${ANDROID_TOOLS_URL}" -o "$ANDROID_TOOLS_PATH" fi mkdir -p "$ANDROID_HOME" unzip -o "$ANDROID_TOOLS_PATH" -d "$ANDROID_HOME" - yes | "${ANDROID_HOME}"/cmdline-tools/bin/sdkmanager --sdk_root="${ANDROID_HOME}" --install "build-tools;${ANDROID_BUILD_TOOLS_VERSION}" "platform-tools" "platforms;android-${ANDROID_API_LEVEL}" "ndk;${ANDROID_NDK_VERSION}" + yes | "${ANDROID_HOME}"/cmdline-tools/bin/sdkmanager --sdk_root="${ANDROID_HOME}" --install "build-tools;${ANDROID_BUILD_TOOLS_VERSION}" "platform-tools" "platforms;android-31" "platforms;android-${ANDROID_API_LEVEL}" "ndk;${ANDROID_NDK_VERSION}" fi git config --global ${CFG_DONE} "true" diff --git a/ci/test/02_run_container.sh b/ci/test/02_run_container.sh new file mode 100755 index 0000000000000..8a7a978994673 --- /dev/null +++ b/ci/test/02_run_container.sh @@ -0,0 +1,92 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2018-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 +export CI_IMAGE_LABEL="bitcoin-ci-test" + +set -ex + +if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then + # Export all env vars to avoid missing some. + # Though, exclude those with newlines to avoid parsing problems. + python3 -c 'import os; [print(f"{key}={value}") for key, value in os.environ.items() if "\n" not in value and "HOME" != key and "PATH" != key and "USER" != key]' | tee /tmp/env + # System-dependent env vars must be kept as is. So read them from the container. + docker run --rm "${CI_IMAGE_NAME_TAG}" bash -c "env | grep --extended-regexp '^(HOME|PATH|USER)='" | tee --append /tmp/env + echo "Creating $CI_IMAGE_NAME_TAG container to run in" + DOCKER_BUILDKIT=1 docker build \ + --file "${BASE_READ_ONLY_DIR}/ci/test_imagefile" \ + --build-arg "CI_IMAGE_NAME_TAG=${CI_IMAGE_NAME_TAG}" \ + --build-arg "FILE_ENV=${FILE_ENV}" \ + --label="${CI_IMAGE_LABEL}" \ + --tag="${CONTAINER_NAME}" \ + "${BASE_READ_ONLY_DIR}" + docker volume create "${CONTAINER_NAME}_ccache" || true + docker volume create "${CONTAINER_NAME}_depends" || true + docker volume create "${CONTAINER_NAME}_depends_sources" || true + docker volume create "${CONTAINER_NAME}_depends_SDKs_android" || true + docker volume create "${CONTAINER_NAME}_previous_releases" || true + + if [ -n "${RESTART_CI_DOCKER_BEFORE_RUN}" ] ; then + echo "Restart docker before run to stop and clear all containers started with --rm" + podman container rm --force --all # Similar to "systemctl restart docker" + + # Still prune everything in case the filtered pruning doesn't work, or if labels were not set + # on a previous run. Belt and suspenders approach, should be fine to remove in the future. + # Prune images used by --external containers (e.g. build containers) when + # using podman. + echo "Prune all dangling images" + podman image prune --force --external + fi + echo "Prune all dangling $CI_IMAGE_LABEL images" + # When detecting podman-docker, `--external` should be added. + docker image prune --force --filter "label=$CI_IMAGE_LABEL" + + # shellcheck disable=SC2086 + CI_CONTAINER_ID=$(docker run --cap-add LINUX_IMMUTABLE $CI_CONTAINER_CAP --rm --interactive --detach --tty \ + --mount "type=bind,src=$BASE_READ_ONLY_DIR,dst=$BASE_READ_ONLY_DIR,readonly" \ + --mount "type=volume,src=${CONTAINER_NAME}_ccache,dst=$CCACHE_DIR" \ + --mount "type=volume,src=${CONTAINER_NAME}_depends,dst=$DEPENDS_DIR/built" \ + --mount "type=volume,src=${CONTAINER_NAME}_depends_sources,dst=$DEPENDS_DIR/sources" \ + --mount "type=volume,src=${CONTAINER_NAME}_depends_SDKs_android,dst=$DEPENDS_DIR/SDKs/android" \ + --mount "type=volume,src=${CONTAINER_NAME}_previous_releases,dst=$PREVIOUS_RELEASES_DIR" \ + --env-file /tmp/env \ + --name "$CONTAINER_NAME" \ + "$CONTAINER_NAME") + export CI_CONTAINER_ID + export CI_EXEC_CMD_PREFIX="docker exec ${CI_CONTAINER_ID}" +else + echo "Running on host system without docker wrapper" + echo "Create missing folders" + mkdir -p "${CCACHE_DIR}" + mkdir -p "${PREVIOUS_RELEASES_DIR}" +fi + +if [ "$CI_OS_NAME" == "macos" ]; then + IN_GETOPT_BIN="$(brew --prefix gnu-getopt)/bin/getopt" + export IN_GETOPT_BIN +fi + +CI_EXEC () { + $CI_EXEC_CMD_PREFIX bash -c "export PATH=\"/path_with space:${BINS_SCRATCH_DIR}:${BASE_ROOT_DIR}/ci/retry:\$PATH\" && cd \"${BASE_ROOT_DIR}\" && $*" +} +export -f CI_EXEC + +# Normalize all folders to BASE_ROOT_DIR +CI_EXEC rsync --archive --stats --human-readable "${BASE_READ_ONLY_DIR}/" "${BASE_ROOT_DIR}" || echo "Nothing to copy from ${BASE_READ_ONLY_DIR}/" +CI_EXEC "${BASE_ROOT_DIR}/ci/test/01_base_install.sh" + +# Fixes permission issues when there is a container UID/GID mismatch with the owner +# of the git source code directory. +CI_EXEC git config --global --add safe.directory \"*\" + +CI_EXEC mkdir -p "${BINS_SCRATCH_DIR}" + +CI_EXEC "${BASE_ROOT_DIR}/ci/test/03_test_script.sh" + +if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then + echo "Stop and remove CI container by ID" + docker container kill "${CI_CONTAINER_ID}" +fi diff --git a/ci/test/03_test_script.sh b/ci/test/03_test_script.sh new file mode 100755 index 0000000000000..80fb4f1dc5f01 --- /dev/null +++ b/ci/test/03_test_script.sh @@ -0,0 +1,206 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2018-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +set -ex + +export ASAN_OPTIONS="detect_stack_use_after_return=1:check_initialization_order=1:strict_init_order=1" +export LSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/lsan" +export TSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/tsan:halt_on_error=1:log_path=${BASE_SCRATCH_DIR}/sanitizer-output/tsan" +export UBSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/ubsan:print_stacktrace=1:halt_on_error=1:report_error_type=1" + +if [ "$CI_OS_NAME" == "macos" ]; then + top -l 1 -s 0 | awk ' /PhysMem/ {print}' + echo "Number of CPUs: $(sysctl -n hw.logicalcpu)" +else + free -m -h + echo "Number of CPUs (nproc): $(nproc)" + echo "System info: $(uname --kernel-name --kernel-release)" + lscpu +fi +echo "Free disk space:" +df -h + +# What host to compile for. See also ./depends/README.md +# Tests that need cross-compilation export the appropriate HOST. +# Tests that run natively guess the host +export HOST=${HOST:-$("$BASE_ROOT_DIR/depends/config.guess")} + +( + # compact->outputs[i].file_size is uninitialized memory, so reading it is UB. + # The statistic bytes_written is only used for logging, which is disabled in + # CI, so as a temporary minimal fix to work around UB and CI failures, leave + # bytes_written unmodified. + # See https://github.com/bitcoin/bitcoin/pull/28359#issuecomment-1698694748 + echo 'diff --git a/src/leveldb/db/db_impl.cc b/src/leveldb/db/db_impl.cc +index 65e31724bc..f61b471953 100644 +--- a/src/leveldb/db/db_impl.cc ++++ b/src/leveldb/db/db_impl.cc +@@ -1028,9 +1028,6 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) { + stats.bytes_read += compact->compaction->input(which, i)->file_size; + } + } +- for (size_t i = 0; i < compact->outputs.size(); i++) { +- stats.bytes_written += compact->outputs[i].file_size; +- } + + mutex_.Lock(); + stats_[compact->compaction->level() + 1].Add(stats);' | patch -p1 + git diff +) + +if [ "$RUN_FUZZ_TESTS" = "true" ]; then + export DIR_FUZZ_IN=${DIR_QA_ASSETS}/fuzz_seed_corpus/ + if [ ! -d "$DIR_FUZZ_IN" ]; then + ${CI_RETRY_EXE} git clone --depth=1 https://github.com/bitcoin-core/qa-assets "${DIR_QA_ASSETS}" + fi + ( + cd "${DIR_QA_ASSETS}" + echo "Using qa-assets repo from commit ..." + git log -1 + ) +elif [ "$RUN_UNIT_TESTS" = "true" ] || [ "$RUN_UNIT_TESTS_SEQUENTIAL" = "true" ]; then + export DIR_UNIT_TEST_DATA=${DIR_QA_ASSETS}/unit_test_data/ + if [ ! -d "$DIR_UNIT_TEST_DATA" ]; then + mkdir -p "$DIR_UNIT_TEST_DATA" + ${CI_RETRY_EXE} curl --location --fail https://github.com/bitcoin-core/qa-assets/raw/main/unit_test_data/script_assets_test.json -o "${DIR_UNIT_TEST_DATA}/script_assets_test.json" + fi +fi + +mkdir -p "${BASE_SCRATCH_DIR}/sanitizer-output/" + +if [ "$USE_BUSY_BOX" = "true" ]; then + echo "Setup to use BusyBox utils" + # tar excluded for now because it requires passing in the exact archive type in ./depends (fixed in later BusyBox version) + # ar excluded for now because it does not recognize the -q option in ./depends (unknown if fixed) + for util in $(busybox --list | grep -v "^ar$" | grep -v "^tar$" ); do ln -s "$(command -v busybox)" "${BINS_SCRATCH_DIR}/$util"; done + # Print BusyBox version + patch --help +fi + +# Make sure default datadir does not exist and is never read by creating a dummy file +if [ "$CI_OS_NAME" == "macos" ]; then + echo > "${HOME}/Library/Application Support/Navcoin" +else + echo > "${HOME}/.navcoin" +fi + +if [ -z "$NO_DEPENDS" ]; then + if [[ $CI_IMAGE_NAME_TAG == *centos* ]]; then + SHELL_OPTS="CONFIG_SHELL=/bin/dash" + else + SHELL_OPTS="CONFIG_SHELL=" + fi + bash -c "$SHELL_OPTS make $MAKEJOBS -C depends HOST=$HOST $DEP_OPTS LOG=1" +fi +if [ "$DOWNLOAD_PREVIOUS_RELEASES" = "true" ]; then + test/get_previous_releases.py -b -t "$PREVIOUS_RELEASES_DIR" +fi + +BITCOIN_CONFIG_ALL="--disable-dependency-tracking" +if [ -z "$NO_DEPENDS" ]; then + BITCOIN_CONFIG_ALL="${BITCOIN_CONFIG_ALL} CONFIG_SITE=$DEPENDS_DIR/$HOST/share/config.site" +fi +if [ -z "$NO_WERROR" ]; then + BITCOIN_CONFIG_ALL="${BITCOIN_CONFIG_ALL} --enable-werror" +fi + +ccache --zero-stats +PRINT_CCACHE_STATISTICS="ccache --version | head -n 1 && ccache --show-stats" + +if [ -n "$ANDROID_TOOLS_URL" ]; then + make distclean || true + ./autogen.sh + bash -c "./configure $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG" || ( (cat config.log) && false) + make "${MAKEJOBS}" && cd src/qt && ANDROID_HOME=${ANDROID_HOME} ANDROID_NDK_HOME=${ANDROID_NDK_HOME} make apk + bash -c "${PRINT_CCACHE_STATISTICS}" + exit 0 +fi + +BITCOIN_CONFIG_ALL="${BITCOIN_CONFIG_ALL} --enable-external-signer --prefix=$BASE_OUTDIR" + +if [ -n "$CONFIG_SHELL" ]; then + "$CONFIG_SHELL" -c "./autogen.sh" +else + ./autogen.sh +fi + +mkdir -p "${BASE_BUILD_DIR}" +cd "${BASE_BUILD_DIR}" + +bash -c "${BASE_ROOT_DIR}/configure --cache-file=config.cache $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG" || ( (cat config.log) && false) + +make distdir VERSION="$HOST" + +cd "${BASE_BUILD_DIR}/navcoin-$HOST" + +bash -c "./configure --cache-file=../config.cache $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG" || ( (cat config.log) && false) + +if [[ "${RUN_TIDY}" == "true" ]]; then + MAYBE_BEAR="bear --config src/.bear-tidy-config" + MAYBE_TOKEN="--" +fi + +bash -c "${MAYBE_BEAR} ${MAYBE_TOKEN} make $MAKEJOBS $GOAL" || ( echo "Build failure. Verbose build follows." && make "$GOAL" V=1 ; false ) + +bash -c "${PRINT_CCACHE_STATISTICS}" +du -sh "${DEPENDS_DIR}"/*/ +du -sh "${PREVIOUS_RELEASES_DIR}" + +if [[ $HOST = *-mingw32 ]]; then + # Generate all binaries, so that they can be wrapped + make "$MAKEJOBS" -C src/secp256k1 VERBOSE=1 + make "$MAKEJOBS" -C src minisketch/test.exe VERBOSE=1 + "${BASE_ROOT_DIR}/ci/test/wrap-wine.sh" +fi + +if [ -n "$USE_VALGRIND" ]; then + "${BASE_ROOT_DIR}/ci/test/wrap-valgrind.sh" +fi + +if [ "$RUN_UNIT_TESTS" = "true" ]; then + DIR_UNIT_TEST_DATA="${DIR_UNIT_TEST_DATA}" LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" make "${MAKEJOBS}" check VERBOSE=1 +fi + +if [ "$RUN_UNIT_TESTS_SEQUENTIAL" = "true" ]; then + DIR_UNIT_TEST_DATA="${DIR_UNIT_TEST_DATA}" LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" "${BASE_OUTDIR}"/bin/test_navcoin --catch_system_errors=no -l test_suite +fi + +if [ "$RUN_FUNCTIONAL_TESTS" = "true" ]; then + # shellcheck disable=SC2086 + LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" test/functional/test_runner.py --ci "${MAKEJOBS}" --tmpdirprefix "${BASE_SCRATCH_DIR}"/test_runner/ --ansi --combinedlogslen=99999999 --timeout-factor="${TEST_RUNNER_TIMEOUT_FACTOR}" ${TEST_RUNNER_EXTRA} --quiet --failfast +fi + +if [ "${RUN_TIDY}" = "true" ]; then + cmake -B /tidy-build -DLLVM_DIR=/usr/lib/llvm-"${TIDY_LLVM_V}"/cmake -DCMAKE_BUILD_TYPE=Release -S "${BASE_ROOT_DIR}"/contrib/devtools/bitcoin-tidy + cmake --build /tidy-build "$MAKEJOBS" + cmake --build /tidy-build --target bitcoin-tidy-tests "$MAKEJOBS" + + set -eo pipefail + cd "${BASE_BUILD_DIR}/navcoin-$HOST/src/" + ( run-clang-tidy-"${TIDY_LLVM_V}" -quiet -load="/tidy-build/libbitcoin-tidy.so" "${MAKEJOBS}" ) | grep -C5 "error" + # Filter out files by regex here, because regex may not be + # accepted in src/.bear-tidy-config + # Filter out: + # * qt qrc and moc generated files + jq 'map(select(.file | test("src/qt/qrc_.*\\.cpp$|/moc_.*\\.cpp$") | not))' ../compile_commands.json > tmp.json + mv tmp.json ../compile_commands.json + cd "${BASE_BUILD_DIR}/navcoin-$HOST/" + python3 "/include-what-you-use/iwyu_tool.py" \ + -p . "${MAKEJOBS}" \ + -- -Xiwyu --cxx17ns -Xiwyu --mapping_file="${BASE_BUILD_DIR}/navcoin-$HOST/contrib/devtools/iwyu/bitcoin.core.imp" \ + -Xiwyu --max_line_length=160 \ + 2>&1 | tee /tmp/iwyu_ci.out + cd "${BASE_ROOT_DIR}/src" + python3 "/include-what-you-use/fix_includes.py" --nosafe_headers < /tmp/iwyu_ci.out + git --no-pager diff +fi + +if [ "$RUN_FUZZ_TESTS" = "true" ]; then + # shellcheck disable=SC2086 + LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" test/fuzz/test_runner.py ${FUZZ_TESTS_CONFIG} "${MAKEJOBS}" -l DEBUG "${DIR_FUZZ_IN}" --empty_min_time=60 +fi diff --git a/ci/test/04_install.sh b/ci/test/04_install.sh deleted file mode 100755 index 5ab6a7f9820e6..0000000000000 --- a/ci/test/04_install.sh +++ /dev/null @@ -1,114 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2018-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C.UTF-8 - -if [[ $QEMU_USER_CMD == qemu-s390* ]]; then - export LC_ALL=C -fi - -# Create folders that are mounted into the docker -mkdir -p "${CCACHE_DIR}" -mkdir -p "${PREVIOUS_RELEASES_DIR}" - -export ASAN_OPTIONS="detect_stack_use_after_return=1:check_initialization_order=1:strict_init_order=1" -export LSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/lsan" -export TSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/tsan:halt_on_error=1:log_path=${BASE_SCRATCH_DIR}/sanitizer-output/tsan" -export UBSAN_OPTIONS="suppressions=${BASE_ROOT_DIR}/test/sanitizer_suppressions/ubsan:print_stacktrace=1:halt_on_error=1:report_error_type=1" -if [[ $BITCOIN_CONFIG = *--with-sanitizers=*address* ]]; then # If ran with (ASan + LSan), Docker needs access to ptrace (https://github.com/google/sanitizers/issues/764) - CI_CONTAINER_CAP="--cap-add SYS_PTRACE" -fi - -export P_CI_DIR="$PWD" -export BINS_SCRATCH_DIR="${BASE_SCRATCH_DIR}/bins/" - -if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then - # Export all env vars to avoid missing some. - # Though, exclude those with newlines to avoid parsing problems. - python3 -c 'import os; [print(f"{key}={value}") for key, value in os.environ.items() if "\n" not in value]' | tee /tmp/env - echo "Creating $CI_IMAGE_NAME_TAG container to run in" - DOCKER_BUILDKIT=1 ${CI_RETRY_EXE} docker build \ - --file "${BASE_ROOT_DIR}/ci/test_imagefile" \ - --build-arg "CI_IMAGE_NAME_TAG=${CI_IMAGE_NAME_TAG}" \ - --build-arg "FILE_ENV=${FILE_ENV}" \ - --tag="${CONTAINER_NAME}" \ - "${BASE_ROOT_DIR}" - docker volume create "${CONTAINER_NAME}_ccache" || true - docker volume create "${CONTAINER_NAME}_depends" || true - docker volume create "${CONTAINER_NAME}_previous_releases" || true - - if [ -n "${RESTART_CI_DOCKER_BEFORE_RUN}" ] ; then - echo "Prune stopped containers" - podman container prune -f - echo "Prune all dangling images" - docker image prune --force - fi - - # shellcheck disable=SC2086 - CI_CONTAINER_ID=$(docker run $CI_CONTAINER_CAP --rm --interactive --detach --tty \ - --mount type=bind,src=$BASE_ROOT_DIR,dst=/ro_base,readonly \ - --mount "type=volume,src=${CONTAINER_NAME}_ccache,dst=$CCACHE_DIR" \ - --mount "type=volume,src=${CONTAINER_NAME}_depends,dst=$DEPENDS_DIR" \ - --mount "type=volume,src=${CONTAINER_NAME}_previous_releases,dst=$PREVIOUS_RELEASES_DIR" \ - -w $BASE_ROOT_DIR \ - --env-file /tmp/env \ - --name $CONTAINER_NAME \ - $CONTAINER_NAME) - export CI_CONTAINER_ID - export CI_EXEC_CMD_PREFIX="docker exec ${CI_CONTAINER_ID}" -else - echo "Running on host system without docker wrapper" -fi - -CI_EXEC () { - $CI_EXEC_CMD_PREFIX bash -c "export PATH=${BINS_SCRATCH_DIR}:\$PATH && cd \"$P_CI_DIR\" && $*" -} -export -f CI_EXEC - -CI_EXEC rsync --archive --stats --human-readable /ci_base_install/ "${BASE_ROOT_DIR}" || echo "/ci_base_install/ missing" -CI_EXEC "${BASE_ROOT_DIR}/ci/test/01_base_install.sh" -CI_EXEC rsync --archive --stats --human-readable /ro_base/ "${BASE_ROOT_DIR}" || echo "Nothing to copy from ro_base" -# Fixes permission issues when there is a container UID/GID mismatch with the owner -# of the git source code directory. -CI_EXEC git config --global --add safe.directory \"*\" - -CI_EXEC mkdir -p "${BINS_SCRATCH_DIR}" - -if [ "$CI_OS_NAME" == "macos" ]; then - top -l 1 -s 0 | awk ' /PhysMem/ {print}' - echo "Number of CPUs: $(sysctl -n hw.logicalcpu)" -else - CI_EXEC free -m -h - CI_EXEC echo "Number of CPUs \(nproc\):" \$\(nproc\) - CI_EXEC echo "$(lscpu | grep Endian)" -fi -CI_EXEC echo "Free disk space:" -CI_EXEC df -h - -if [ "$RUN_FUZZ_TESTS" = "true" ]; then - export DIR_FUZZ_IN=${DIR_QA_ASSETS}/fuzz_seed_corpus/ - if [ ! -d "$DIR_FUZZ_IN" ]; then - CI_EXEC git clone --depth=1 https://github.com/navcoin/qa-assets "${DIR_QA_ASSETS}" - fi -elif [ "$RUN_UNIT_TESTS" = "true" ] || [ "$RUN_UNIT_TESTS_SEQUENTIAL" = "true" ]; then - export DIR_UNIT_TEST_DATA=${DIR_QA_ASSETS}/unit_test_data/ - if [ ! -d "$DIR_UNIT_TEST_DATA" ]; then - CI_EXEC mkdir -p "$DIR_UNIT_TEST_DATA" - CI_EXEC curl --location --fail https://github.com/navcoin/qa-assets/raw/main/unit_test_data/script_assets_test.json -o "${DIR_UNIT_TEST_DATA}/script_assets_test.json" - fi -fi - -CI_EXEC mkdir -p "${BASE_SCRATCH_DIR}/sanitizer-output/" - -if [ "$USE_BUSY_BOX" = "true" ]; then - echo "Setup to use BusyBox utils" - # tar excluded for now because it requires passing in the exact archive type in ./depends (fixed in later BusyBox version) - # ar excluded for now because it does not recognize the -q option in ./depends (unknown if fixed) - # shellcheck disable=SC1010 - CI_EXEC for util in \$\(busybox --list \| grep -v "^ar$" \| grep -v "^tar$" \)\; do ln -s \$\(command -v busybox\) "${BINS_SCRATCH_DIR}/\$util"\; done - # Print BusyBox version - CI_EXEC patch --help -fi diff --git a/ci/test/05_before_script.sh b/ci/test/05_before_script.sh deleted file mode 100755 index 199cdd64a7c6c..0000000000000 --- a/ci/test/05_before_script.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2018-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C.UTF-8 - -# Make sure default datadir does not exist and is never read by creating a dummy file -if [ "$CI_OS_NAME" == "macos" ]; then - echo > "${HOME}/Library/Application Support/Bitcoin" -else - CI_EXEC echo \> \$HOME/.bitcoin -fi - -if [ -z "$NO_DEPENDS" ]; then - if [[ $CI_IMAGE_NAME_TAG == *centos* ]]; then - # CentOS has problems building the depends if the config shell is not explicitly set - # (i.e. for libevent a Makefile with an empty SHELL variable is generated, leading to - # an error as the first command is executed) - SHELL_OPTS="LC_ALL=en_US.UTF-8 CONFIG_SHELL=/bin/dash" - else - SHELL_OPTS="CONFIG_SHELL=" - fi - CI_EXEC "$SHELL_OPTS" make "$MAKEJOBS" -C depends HOST="$HOST" "$DEP_OPTS" LOG=1 -fi -if [ "$DOWNLOAD_PREVIOUS_RELEASES" = "true" ]; then - CI_EXEC test/get_previous_releases.py -b -t "$PREVIOUS_RELEASES_DIR" -fi diff --git a/ci/test/06_script_a.sh b/ci/test/06_script_a.sh deleted file mode 100755 index ed6b04807b414..0000000000000 --- a/ci/test/06_script_a.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2018-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C.UTF-8 - -BITCOIN_CONFIG_ALL="--enable-suppress-external-warnings --disable-dependency-tracking" -if [ -z "$NO_DEPENDS" ]; then - BITCOIN_CONFIG_ALL="${BITCOIN_CONFIG_ALL} CONFIG_SITE=$DEPENDS_DIR/$HOST/share/config.site" -fi -if [ -z "$NO_WERROR" ]; then - BITCOIN_CONFIG_ALL="${BITCOIN_CONFIG_ALL} --enable-werror" -fi - -CI_EXEC "ccache --zero-stats --max-size=$CCACHE_SIZE" -PRINT_CCACHE_STATISTICS="ccache --version | head -n 1 && ccache --show-stats" - -if [ -n "$ANDROID_TOOLS_URL" ]; then - CI_EXEC make distclean || true - CI_EXEC ./autogen.sh - CI_EXEC ./configure "$BITCOIN_CONFIG_ALL" "$BITCOIN_CONFIG" || ( (CI_EXEC cat config.log) && false) - CI_EXEC "make $MAKEJOBS" - CI_EXEC "${PRINT_CCACHE_STATISTICS}" - exit 0 -fi - -BITCOIN_CONFIG_ALL="${BITCOIN_CONFIG_ALL} --enable-external-signer --prefix=$BASE_OUTDIR" - -if [ -n "$CONFIG_SHELL" ]; then - CI_EXEC "$CONFIG_SHELL" -c "./autogen.sh" -else - CI_EXEC ./autogen.sh -fi - -CI_EXEC mkdir -p "${BASE_BUILD_DIR}" -export P_CI_DIR="${BASE_BUILD_DIR}" - -CI_EXEC "${BASE_ROOT_DIR}/configure" --cache-file=config.cache "$BITCOIN_CONFIG_ALL" "$BITCOIN_CONFIG" || ( (CI_EXEC cat config.log) && false) - -CI_EXEC make distdir VERSION="$HOST" - -export P_CI_DIR="${BASE_BUILD_DIR}/navcoin-$HOST" - -CI_EXEC ./configure --cache-file=../config.cache "$BITCOIN_CONFIG_ALL" "$BITCOIN_CONFIG" || ( (CI_EXEC cat config.log) && false) - -set -o errtrace -trap 'CI_EXEC "cat ${BASE_SCRATCH_DIR}/sanitizer-output/* 2> /dev/null"' ERR - -if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then - # MemorySanitizer (MSAN) does not support tracking memory initialization done by - # using the Linux getrandom syscall. Avoid using getrandom by undefining - # HAVE_SYS_GETRANDOM. See https://github.com/google/sanitizers/issues/852 for - # details. - CI_EXEC 'grep -v HAVE_SYS_GETRANDOM src/config/bitcoin-config.h > src/config/bitcoin-config.h.tmp && mv src/config/bitcoin-config.h.tmp src/config/bitcoin-config.h' -fi - -if [[ "${RUN_TIDY}" == "true" ]]; then - MAYBE_BEAR="bear --config src/.bear-tidy-config" - MAYBE_TOKEN="--" -fi - -CI_EXEC "${MAYBE_BEAR}" "${MAYBE_TOKEN}" make "$MAKEJOBS" "$GOAL" || ( echo "Build failure. Verbose build follows." && CI_EXEC make "$GOAL" V=1 ; false ) - -CI_EXEC "${PRINT_CCACHE_STATISTICS}" -CI_EXEC du -sh "${DEPENDS_DIR}"/*/ -CI_EXEC du -sh "${PREVIOUS_RELEASES_DIR}" diff --git a/ci/test/06_script_b.sh b/ci/test/06_script_b.sh deleted file mode 100755 index 6326dfb9d6ddb..0000000000000 --- a/ci/test/06_script_b.sh +++ /dev/null @@ -1,103 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2018-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C.UTF-8 - -set -ex - -if [[ $HOST = *-mingw32 ]]; then - # Generate all binaries, so that they can be wrapped - make "$MAKEJOBS" -C src/secp256k1 VERBOSE=1 - make "$MAKEJOBS" -C src minisketch/test.exe VERBOSE=1 - "${BASE_ROOT_DIR}/ci/test/wrap-wine.sh" -fi - -if [ -n "$QEMU_USER_CMD" ]; then - # Generate all binaries, so that they can be wrapped - make "$MAKEJOBS" -C src/secp256k1 VERBOSE=1 - make "$MAKEJOBS" -C src minisketch/test VERBOSE=1 - "${BASE_ROOT_DIR}/ci/test/wrap-qemu.sh" -fi - -if [ -n "$USE_VALGRIND" ]; then - "${BASE_ROOT_DIR}/ci/test/wrap-valgrind.sh" -fi - -if [ "$RUN_UNIT_TESTS" = "true" ]; then - bash -c "${TEST_RUNNER_ENV} DIR_UNIT_TEST_DATA=${DIR_UNIT_TEST_DATA} LD_LIBRARY_PATH=${DEPENDS_DIR}/${HOST}/lib make $MAKEJOBS check VERBOSE=1" -fi - -if [ "$RUN_UNIT_TESTS_SEQUENTIAL" = "true" ]; then - bash -c "${TEST_RUNNER_ENV} DIR_UNIT_TEST_DATA=${DIR_UNIT_TEST_DATA} LD_LIBRARY_PATH=${DEPENDS_DIR}/${HOST}/lib ${BASE_OUTDIR}/bin/test_navcoin --catch_system_errors=no -l test_suite" -fi - -if [ "$RUN_FUNCTIONAL_TESTS" = "true" ]; then - bash -c "LD_LIBRARY_PATH=${DEPENDS_DIR}/${HOST}/lib ${TEST_RUNNER_ENV} test/functional/test_runner.py --ci $MAKEJOBS --tmpdirprefix ${BASE_SCRATCH_DIR}/test_runner/ --ansi --combinedlogslen=99999999 --timeout-factor=${TEST_RUNNER_TIMEOUT_FACTOR} ${TEST_RUNNER_EXTRA} --quiet --failfast" -fi - -if [ "${RUN_TIDY}" = "true" ]; then - set -eo pipefail - cd "${BASE_BUILD_DIR}/navcoin-$HOST/src/" - ( run-clang-tidy-16 -quiet "${MAKEJOBS}" ) | grep -C5 "error" - cd "${BASE_BUILD_DIR}/navcoin-$HOST/" - python3 "${DIR_IWYU}/include-what-you-use/iwyu_tool.py" \ - src/common/args.cpp \ - src/common/config.cpp \ - src/common/init.cpp \ - src/common/url.cpp \ - src/compat \ - src/dbwrapper.cpp \ - src/init \ - src/kernel \ - src/node/chainstate.cpp \ - src/node/chainstatemanager_args.cpp \ - src/node/mempool_args.cpp \ - src/node/minisketchwrapper.cpp \ - src/node/utxo_snapshot.cpp \ - src/node/validation_cache_args.cpp \ - src/policy/feerate.cpp \ - src/policy/packages.cpp \ - src/policy/settings.cpp \ - src/primitives/transaction.cpp \ - src/random.cpp \ - src/rpc/fees.cpp \ - src/rpc/signmessage.cpp \ - src/test/fuzz/string.cpp \ - src/test/fuzz/txorphan.cpp \ - src/test/fuzz/util \ - src/test/util/coins.cpp \ - src/uint256.cpp \ - src/util/bip32.cpp \ - src/util/bytevectorhash.cpp \ - src/util/check.cpp \ - src/util/error.cpp \ - src/util/exception.cpp \ - src/util/getuniquepath.cpp \ - src/util/hasher.cpp \ - src/util/message.cpp \ - src/util/moneystr.cpp \ - src/util/serfloat.cpp \ - src/util/spanparsing.cpp \ - src/util/strencodings.cpp \ - src/util/string.cpp \ - src/util/syserror.cpp \ - src/util/threadinterrupt.cpp \ - src/zmq \ - -p . "${MAKEJOBS}" \ - -- -Xiwyu --cxx17ns -Xiwyu --mapping_file="${BASE_BUILD_DIR}/navcoin-$HOST/contrib/devtools/iwyu/bitcoin.core.imp" \ - 2>&1 | tee /tmp/iwyu_ci.out - cd "${BASE_ROOT_DIR}/src" - python3 "${DIR_IWYU}/include-what-you-use/fix_includes.py" --nosafe_headers < /tmp/iwyu_ci.out - git --no-pager diff -fi - -if [ "$RUN_SECURITY_TESTS" = "true" ]; then - make test-security-check -fi - -if [ "$RUN_FUZZ_TESTS" = "true" ]; then - bash -c "LD_LIBRARY_PATH=${DEPENDS_DIR}/${HOST}/lib test/fuzz/test_runner.py ${FUZZ_TESTS_CONFIG} $MAKEJOBS -l DEBUG ${DIR_FUZZ_IN}" -fi diff --git a/ci/test/wrap-qemu.sh b/ci/test/wrap-qemu.sh deleted file mode 100755 index e028ede37888f..0000000000000 --- a/ci/test/wrap-qemu.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2018-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C.UTF-8 - -for b_name in {"${BASE_OUTDIR}/bin"/*,src/secp256k1/*tests,src/minisketch/test{,-verify},src/univalue/{test_json,unitester,object}}; do - # shellcheck disable=SC2044 - for b in $(find "${BASE_ROOT_DIR}" -executable -type f -name "$(basename "$b_name")"); do - echo "Wrap $b ..." - mv "$b" "${b}_orig" - echo '#!/usr/bin/env bash' > "$b" - echo "$QEMU_USER_CMD \"${b}_orig\" \"\$@\"" >> "$b" - chmod +x "$b" - done -done diff --git a/ci/test/wrapped-cl.bat b/ci/test/wrapped-cl.bat deleted file mode 100644 index fc2a604c580be..0000000000000 --- a/ci/test/wrapped-cl.bat +++ /dev/null @@ -1 +0,0 @@ -ccache cl %* diff --git a/ci/test_imagefile b/ci/test_imagefile index 4854708d1a9b2..f8b5eea1c88ab 100644 --- a/ci/test_imagefile +++ b/ci/test_imagefile @@ -1,3 +1,9 @@ +# Copyright (c) The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +# See ci/README.md for usage. + ARG CI_IMAGE_NAME_TAG FROM ${CI_IMAGE_NAME_TAG} @@ -5,6 +11,6 @@ ARG FILE_ENV ENV FILE_ENV=${FILE_ENV} COPY ./ci/retry/retry /usr/bin/retry -COPY ./ci/test/00_setup_env.sh ./${FILE_ENV} ./ci/test/01_base_install.sh /ci_base_install/ci/test/ +COPY ./ci/test/00_setup_env.sh ./${FILE_ENV} ./ci/test/01_base_install.sh /ci_container_base/ci/test/ -RUN ["bash", "-c", "cd /ci_base_install/ && set -o errexit && source ./ci/test/00_setup_env.sh && ./ci/test/01_base_install.sh"] +RUN ["bash", "-c", "cd /ci_container_base/ && set -o errexit && source ./ci/test/00_setup_env.sh && ./ci/test/01_base_install.sh"] diff --git a/ci/test_run_all.sh b/ci/test_run_all.sh index 751a4056df7bc..3afc47b23edae 100755 --- a/ci/test_run_all.sh +++ b/ci/test_run_all.sh @@ -1,19 +1,11 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2020 The Bitcoin Core developers +# Copyright (c) 2019-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 set -o errexit; source ./ci/test/00_setup_env.sh -set -o errexit; source ./ci/test/04_install.sh -set -o errexit; source ./ci/test/05_before_script.sh -set -o errexit; source ./ci/test/06_script_a.sh set -o errexit -CI_EXEC "${BASE_ROOT_DIR}/ci/test/06_script_b.sh" - -if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then - echo "Stop and remove CI container by ID" - docker container kill "${CI_CONTAINER_ID}" -fi +"./ci/test/02_run_container.sh" diff --git a/configure.ac b/configure.ac index 34291ebf2b971..8f394157e2b07 100644 --- a/configure.ac +++ b/configure.ac @@ -1,15 +1,15 @@ AC_PREREQ([2.69]) -define(_CLIENT_VERSION_MAJOR, 25) +define(_CLIENT_VERSION_MAJOR, 26) define(_CLIENT_VERSION_MINOR, 99) define(_CLIENT_VERSION_BUILD, 0) define(_CLIENT_VERSION_RC, 0) define(_CLIENT_VERSION_IS_RELEASE, false) -define(_COPYRIGHT_YEAR, 2023) +define(_COPYRIGHT_YEAR, 2024) define(_COPYRIGHT_HOLDERS,[The %s developers]) -define(_COPYRIGHT_HOLDERS_SUBSTITUTION,[[Bitcoin Core]]) +define(_COPYRIGHT_HOLDERS_SUBSTITUTION,[[Bitcoin Core, Navcoin Core]]) AC_INIT([Navcoin Core],m4_join([.], _CLIENT_VERSION_MAJOR, _CLIENT_VERSION_MINOR, _CLIENT_VERSION_BUILD)m4_if(_CLIENT_VERSION_RC, [0], [], [rc]_CLIENT_VERSION_RC),[https://github.com/navcoin/navcoin/issues],[navcoin],[https://navcoin.org/]) AC_CONFIG_SRCDIR([src/validation.cpp]) -AC_CONFIG_HEADERS([src/config/bitcoin-config.h]) +AC_CONFIG_HEADERS([src/config/navcoin-config.h]) AC_CONFIG_AUX_DIR([build-aux]) AC_CONFIG_MACRO_DIR([build-aux/m4]) @@ -29,15 +29,15 @@ if test -n "$PKG_CONFIG_LIBDIR"; then PKG_CONFIG="env PKG_CONFIG_LIBDIR=$PKG_CONFIG_LIBDIR $PKG_CONFIG" fi -BITCOIN_DAEMON_NAME=navcoind -BITCOIN_TEST_NAME=test_navcoin -BITCOIN_CLI_NAME=navcoin-cli -BITCOIN_TX_NAME=navcoin-tx -BITCOIN_UTIL_NAME=navcoin-util -BITCOIN_CHAINSTATE_NAME=navcoin-chainstate -BITCOIN_WALLET_TOOL_NAME=navcoin-wallet +NAVCOIN_DAEMON_NAME=navcoind +NAVCOIN_TEST_NAME=test_navcoin +NAVCOIN_CLI_NAME=navcoin-cli +NAVCOIN_TX_NAME=navcoin-tx +NAVCOIN_UTIL_NAME=navcoin-util +NAVCOIN_CHAINSTATE_NAME=navcoin-chainstate +NAVCOIN_WALLET_TOOL_NAME=navcoin-wallet dnl Multi Process -BITCOIN_MP_NODE_NAME=navcoin-node +NAVCOIN_MP_NODE_NAME=navcoin-node dnl Unless the user specified ARFLAGS, force it to be cr dnl This is also the default as-of libtool 2.4.7 @@ -48,9 +48,9 @@ fi AC_CANONICAL_HOST -AH_TOP([#ifndef BITCOIN_CONFIG_H]) -AH_TOP([#define BITCOIN_CONFIG_H]) -AH_BOTTOM([#endif //BITCOIN_CONFIG_H]) +AH_TOP([#ifndef NAVCOIN_CONFIG_H]) +AH_TOP([#define NAVCOIN_CONFIG_H]) +AH_BOTTOM([#endif //NAVCOIN_CONFIG_H]) dnl Automake init set-up and checks AM_INIT_AUTOMAKE([1.13 no-define subdir-objects foreign]) @@ -68,11 +68,12 @@ else fi AC_PROG_CXX -dnl By default, libtool for mingw refuses to link static libs into a dll for -dnl fear of mixing pic/non-pic objects, and import/export complications. Since -dnl we have those under control, re-enable that functionality. +dnl libtool overrides case $host in *mingw*) + dnl By default, libtool for mingw refuses to link static libs into a dll for + dnl fear of mixing pic/non-pic objects, and import/export complications. Since + dnl we have those under control, re-enable that functionality. lt_cv_deplibs_check_method="pass_all" dnl Remove unwanted -DDLL_EXPORT from these variables. @@ -81,29 +82,20 @@ case $host in lt_cv_prog_compiler_pic="-DPIC" lt_cv_prog_compiler_pic_CXX="-DPIC" ;; + *darwin*) + dnl Because it prints a verbose warning, lld fails the following check + dnl for "-Wl,-single_module" from libtool.m4: + dnl # If there is a non-empty error log, and "single_module" + dnl # appears in it, assume the flag caused a linker warning + dnl "-single_module" works fine on ld64 and lld, so just bypass the test. + dnl Failure to set this to "yes" causes libtool to use a very broken + dnl link-line for shared libs. + lt_cv_apple_cc_single_mod="yes" + ;; esac -AC_ARG_WITH([seccomp], - [AS_HELP_STRING([--with-seccomp], - [enable experimental syscall sandbox feature (-sandbox), default is yes if seccomp-bpf is detected under Linux x86_64])], - [seccomp_found=$withval], - [seccomp_found=auto]) - -AC_ARG_ENABLE([c++20], - [AS_HELP_STRING([--enable-c++20], - [enable compilation in c++20 mode (disabled by default)])], - [use_cxx20=$enableval], - [use_cxx20=no]) - -dnl Require C++17 compiler (no GNU extensions) -if test "$use_cxx20" = "no"; then -AX_CXX_COMPILE_STDCXX([17], [noext], [mandatory]) -else +dnl Require C++20 compiler (no GNU extensions) AX_CXX_COMPILE_STDCXX([20], [noext], [mandatory]) -fi - -dnl check if additional link flags are required for std::filesystem -CHECK_FILESYSTEM dnl Unless the user specified OBJCXX, force it to be the same as CXX. This ensures dnl that we get the same -std flags for both. @@ -124,8 +116,8 @@ AC_PATH_TOOL([AR], [ar]) AC_PATH_TOOL([GCOV], [gcov]) AC_PATH_TOOL([LLVM_COV], [llvm-cov]) AC_PATH_PROG([LCOV], [lcov]) -dnl Python 3.8 is specified in .python-version and should be used if available, see doc/dependencies.md -AC_PATH_PROGS([PYTHON], [python3.8 python3.9 python3.10 python3.11 python3.12 python3 python]) +dnl The minimum supported version is specified in .python-version and should be used if available, see doc/dependencies.md +AC_PATH_PROGS([PYTHON], [python3.9 python3.10 python3.11 python3.12 python3 python]) AC_PATH_PROG([GENHTML], [genhtml]) AC_PATH_PROG([GIT], [git]) AC_PATH_PROG([CCACHE], [ccache]) @@ -135,8 +127,6 @@ AC_PATH_TOOL([OBJCOPY], [objcopy]) AC_PATH_PROG([DOXYGEN], [doxygen]) AM_CONDITIONAL([HAVE_DOXYGEN], [test -n "$DOXYGEN"]) -AC_ARG_VAR([PYTHONPATH], [Augments the default search path for python module files]) - AC_ARG_ENABLE([wallet], [AS_HELP_STRING([--disable-wallet], [disable wallet (enabled by default)])], @@ -223,10 +213,10 @@ dnl May be useful if warnings from external headers clutter the build output dnl too much, so that it becomes difficult to spot Navcoin Core warnings dnl or if they cause a build failure with --enable-werror. AC_ARG_ENABLE([suppress-external-warnings], - [AS_HELP_STRING([--enable-suppress-external-warnings], - [Suppress warnings from external headers (default is no)])], + [AS_HELP_STRING([--disable-suppress-external-warnings], + [Do not suppress warnings from external headers (default is to suppress)])], [suppress_external_warnings=$enableval], - [suppress_external_warnings=no]) + [suppress_external_warnings=yes]) AC_ARG_ENABLE([lcov], [AS_HELP_STRING([--enable-lcov], @@ -318,11 +308,6 @@ AC_ARG_ENABLE([external-signer], [use_external_signer=$enableval], [use_external_signer=auto]) -AC_ARG_ENABLE([lto], - [AS_HELP_STRING([--enable-lto],[build using LTO (default is no)])], - [enable_lto=$enableval], - [enable_lto=no]) - AC_ARG_ENABLE([build-libblsct-only], [AS_HELP_STRING([--enable-build-libblsct-only], [build libblsct.a only and do not build others (default is to build everything)])], @@ -331,6 +316,15 @@ AC_ARG_ENABLE([build-libblsct-only], AC_LANG_PUSH([C++]) +dnl Always set -g -O2 in our CXXFLAGS. Autoconf will try and set CXXFLAGS to "-g -O2" by default, +dnl so we suppress that (if CXXFLAGS hasn't been overridden by the user), given we are adding it +dnl ourselves. +CORE_CXXFLAGS="$CORE_CXXFLAGS -g -O2" + +if test "$CXXFLAGS_overridden" = "no"; then + CXXFLAGS="" +fi + dnl Check for a flag to turn compiler warnings into errors. This is helpful for checks which may dnl appear to succeed because by default they merely emit warnings when they fail. dnl @@ -355,12 +349,6 @@ case $host in esac if test "$enable_debug" = "yes"; then - dnl If debugging is enabled, and the user hasn't overridden CXXFLAGS, clear - dnl them, to prevent autoconfs "-g -O2" being added. Otherwise we'd end up - dnl with "-O0 -g3 -g -O2". - if test "$CXXFLAGS_overridden" = "no"; then - CXXFLAGS="" - fi dnl Disable all optimizations AX_CHECK_COMPILE_FLAG([-O0], [DEBUG_CXXFLAGS="$DEBUG_CXXFLAGS -O0"], [], [$CXXFLAG_WERROR]) @@ -380,18 +368,14 @@ if test "$enable_debug" = "yes"; then AX_CHECK_COMPILE_FLAG([-ftrapv], [DEBUG_CXXFLAGS="$DEBUG_CXXFLAGS -ftrapv"], [], [$CXXFLAG_WERROR]) fi -if test "$enable_lto" = "yes"; then - AX_CHECK_COMPILE_FLAG([-flto], [LTO_CXXFLAGS="$LTO_CXXFLAGS -flto"], [AC_MSG_ERROR([compile failed with -flto])], [$CXXFLAG_WERROR]) - AX_CHECK_LINK_FLAG([-flto], [LTO_LDFLAGS="$LTO_LDFLAGS -flto"], [AC_MSG_ERROR([link failed with -flto])], [$CXXFLAG_WERROR]) -fi - if test "$use_sanitizers" != ""; then dnl First check if the compiler accepts flags. If an incompatible pair like dnl -fsanitize=address,thread is used here, this check will fail. This will also dnl fail if a bad argument is passed, e.g. -fsanitize=undfeined AX_CHECK_COMPILE_FLAG( [-fsanitize=$use_sanitizers], - [SANITIZER_CXXFLAGS="-fsanitize=$use_sanitizers"], + [SANITIZER_CXXFLAGS="-fsanitize=$use_sanitizers" + SANITIZER_CFLAGS="-fsanitize=$use_sanitizers"], [AC_MSG_ERROR([compiler did not accept requested flags])]) dnl Some compilers (e.g. GCC) require additional libraries like libasan, @@ -418,12 +402,6 @@ if test "$enable_werror" = "yes"; then AC_MSG_ERROR([enable-werror set but -Werror is not usable]) fi ERROR_CXXFLAGS=$CXXFLAG_WERROR - - dnl -Wreturn-type is broken in GCC for MinGW-w64. - dnl https://sourceforge.net/p/mingw-w64/bugs/306/ - AX_CHECK_COMPILE_FLAG([-Werror=return-type], [], [ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Wno-error=return-type"], [$CXXFLAG_WERROR], - [AC_LANG_SOURCE([[#include - int f(){ assert(false); }]])]) fi if test "$CXXFLAGS_overridden" = "no"; then @@ -444,12 +422,9 @@ if test "$CXXFLAGS_overridden" = "no"; then AX_CHECK_COMPILE_FLAG([-Wduplicated-cond], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wduplicated-cond"], [], [$CXXFLAG_WERROR]) AX_CHECK_COMPILE_FLAG([-Wlogical-op], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wlogical-op"], [], [$CXXFLAG_WERROR]) AX_CHECK_COMPILE_FLAG([-Woverloaded-virtual], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Woverloaded-virtual"], [], [$CXXFLAG_WERROR]) - dnl -Wsuggest-override is broken with GCC before 9.2 - dnl https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78010 - AX_CHECK_COMPILE_FLAG([-Wsuggest-override], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wsuggest-override"], [], [$CXXFLAG_WERROR], - [AC_LANG_SOURCE([[struct A { virtual void f(); }; struct B : A { void f() final; };]])]) - AX_CHECK_COMPILE_FLAG([-Wunreachable-code-loop-increment], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wunreachable-code-loop-increment"], [], [$CXXFLAG_WERROR]) + AX_CHECK_COMPILE_FLAG([-Wsuggest-override], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wsuggest-override"], [], [$CXXFLAG_WERROR]) AX_CHECK_COMPILE_FLAG([-Wimplicit-fallthrough], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wimplicit-fallthrough"], [], [$CXXFLAG_WERROR]) + AX_CHECK_COMPILE_FLAG([-Wunreachable-code], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wunreachable-code"], [], [$CXXFLAG_WERROR]) if test "$suppress_external_warnings" != "no" ; then AX_CHECK_COMPILE_FLAG([-Wdocumentation], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wdocumentation"], [], [$CXXFLAG_WERROR]) @@ -468,6 +443,12 @@ fi dnl Don't allow extended (non-ASCII) symbols in identifiers. This is easier for code review. AX_CHECK_COMPILE_FLAG([-fno-extended-identifiers], [CORE_CXXFLAGS="$CORE_CXXFLAGS -fno-extended-identifiers"], [], [$CXXFLAG_WERROR]) +dnl Currently all versions of gcc are subject to a class of bugs, see the +dnl gccbug_90348 test case (only reproduces on GCC 11 and earlier) and +dnl https://gcc.gnu.org/bugzilla/show_bug.cgi?id=111843. To work around that, set +dnl -fstack-reuse=none for all gcc builds. (Only gcc understands this flag) +AX_CHECK_COMPILE_FLAG([-fstack-reuse=none], [CORE_CXXFLAGS="$CORE_CXXFLAGS -fstack-reuse=none"]) + enable_arm_crc=no enable_arm_shani=no enable_sse42=no @@ -575,7 +556,7 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ CXXFLAGS="$TEMP_CXXFLAGS" # ARM -AX_CHECK_COMPILE_FLAG([-march=armv8-a+crc], [ARM_CRC_CXXFLAGS="-march=armv8-a+crc"], [], [$CXXFLAG_WERROR]) +AX_CHECK_COMPILE_FLAG([-march=armv8-a+crc+crypto], [ARM_CRC_CXXFLAGS="-march=armv8-a+crc+crypto"], [], [$CXXFLAG_WERROR]) AX_CHECK_COMPILE_FLAG([-march=armv8-a+crypto], [ARM_SHANI_CXXFLAGS="-march=armv8-a+crypto"], [], [$CXXFLAG_WERROR]) TEMP_CXXFLAGS="$CXXFLAGS" @@ -622,56 +603,56 @@ CORE_CPPFLAGS="$CORE_CPPFLAGS -DHAVE_BUILD_INFO" AC_ARG_WITH([utils], [AS_HELP_STRING([--with-utils], [build navcoin-cli navcoin-tx navcoin-util navcoin-wallet (default=yes)])], - [build_bitcoin_utils=$withval], - [build_bitcoin_utils=yes]) + [build_navcoin_utils=$withval], + [build_navcoin_utils=yes]) AC_ARG_ENABLE([util-cli], [AS_HELP_STRING([--enable-util-cli], [build navcoin-cli])], - [build_bitcoin_cli=$enableval], - [build_bitcoin_cli=$build_bitcoin_utils]) + [build_navcoin_cli=$enableval], + [build_navcoin_cli=$build_navcoin_utils]) AC_ARG_ENABLE([util-tx], [AS_HELP_STRING([--enable-util-tx], [build navcoin-tx])], - [build_bitcoin_tx=$enableval], - [build_bitcoin_tx=$build_bitcoin_utils]) + [build_navcoin_tx=$enableval], + [build_navcoin_tx=$build_navcoin_utils]) AC_ARG_ENABLE([util-wallet], [AS_HELP_STRING([--enable-util-wallet], [build navcoin-wallet])], - [build_bitcoin_wallet=$enableval], - [build_bitcoin_wallet=$build_bitcoin_utils]) + [build_navcoin_wallet=$enableval], + [build_navcoin_wallet=$build_navcoin_utils]) AC_ARG_ENABLE([util-util], [AS_HELP_STRING([--enable-util-util], [build navcoin-util])], - [build_bitcoin_util=$enableval], - [build_bitcoin_util=$build_bitcoin_utils]) + [build_navcoin_util=$enableval], + [build_navcoin_util=$build_navcoin_utils]) AC_ARG_ENABLE([experimental-util-chainstate], [AS_HELP_STRING([--enable-experimental-util-chainstate], [build experimental navcoin-chainstate executable (default=no)])], - [build_bitcoin_chainstate=$enableval], - [build_bitcoin_chainstate=no]) + [build_navcoin_chainstate=$enableval], + [build_navcoin_chainstate=no]) AC_ARG_WITH([libs], [AS_HELP_STRING([--with-libs], [build libraries (default=yes)])], - [build_bitcoin_libs=$withval], - [build_bitcoin_libs=yes]) + [build_navcoin_libs=$withval], + [build_navcoin_libs=yes]) AC_ARG_WITH([experimental-kernel-lib], [AS_HELP_STRING([--with-experimental-kernel-lib], - [build experimental bitcoinkernel library (default is to build if we're building libraries and the experimental build-chainstate executable)])], + [build experimental navcoinkernel library (default is to build if we're building libraries and the experimental build-chainstate executable)])], [build_experimental_kernel_lib=$withval], [build_experimental_kernel_lib=auto]) AC_ARG_WITH([daemon], [AS_HELP_STRING([--with-daemon], [build navcoind daemon (default=yes)])], - [build_bitcoind=$withval], - [build_bitcoind=yes]) + [build_navcoind=$withval], + [build_navcoind=yes]) case $host in *mingw*) @@ -705,6 +686,8 @@ case $host in AC_MSG_ERROR([windres not found]) fi + CORE_CPPFLAGS="$CORE_CPPFLAGS -DSECP256K1_STATIC" + CORE_CPPFLAGS="$CORE_CPPFLAGS -D_MT -DWIN32 -D_WINDOWS -D_WIN32_WINNT=0x0601 -D_WIN32_IE=0x0501 -DWIN32_LEAN_AND_MEAN" dnl Prevent the definition of min/max macros. dnl We always want to use the standard library. @@ -720,6 +703,10 @@ case $host in dnl We require Windows 7 (NT 6.1) or later AX_CHECK_LINK_FLAG([-Wl,--major-subsystem-version -Wl,6 -Wl,--minor-subsystem-version -Wl,1], [CORE_LDFLAGS="$CORE_LDFLAGS -Wl,--major-subsystem-version -Wl,6 -Wl,--minor-subsystem-version -Wl,1"], [], [$LDFLAG_WERROR]) + + dnl Avoid the use of aligned vector instructions when building for Windows. + dnl See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54412. + AX_CHECK_COMPILE_FLAG([-Wa,-muse-unaligned-vector-move], [CORE_CXXFLAGS="$CORE_CXXFLAGS -Wa,-muse-unaligned-vector-move"], [], [$CXXFLAG_WERROR]) ;; *darwin*) TARGET_OS=darwin @@ -740,7 +727,7 @@ case $host in dnl option to system-ify all /usr/local/include paths without adding it to the list dnl of search paths in case it's not already there. if test "$suppress_external_warnings" != "no"; then - AX_CHECK_PREPROC_FLAG([-Xclang -internal-isystem/usr/local/include], [CORE_CPPFLAGS="$CORE_CPPFLAGS -Xclang -internal-isystem/usr/local/include"], [], [$CXXFLAG_WERROR]) + AX_CHECK_PREPROC_FLAG([-Xclang -internal-isystem -Xclang /usr/local/include/], [CORE_CPPFLAGS="$CORE_CPPFLAGS -Xclang -internal-isystem -Xclang /usr/local/include/"], [], [$CXXFLAG_WERROR]) fi if test "$use_bdb" != "no" && $BREW list --versions berkeley-db@4 >/dev/null && test "$BDB_CFLAGS" = "" && test "$BDB_LIBS" = ""; then @@ -750,10 +737,6 @@ case $host in BDB_LIBS="-L$bdb_prefix/lib -ldb_cxx-4.8" fi - if test "$use_sqlite" != "no" && $BREW list --versions sqlite3 >/dev/null; then - export PKG_CONFIG_PATH="$($BREW --prefix sqlite3 2>/dev/null)/lib/pkgconfig:$PKG_CONFIG_PATH" - fi - case $host in *aarch64*) dnl The preferred Homebrew prefix for Apple Silicon is /opt/homebrew. @@ -790,7 +773,7 @@ case $host in AC_PATH_TOOL([DSYMUTIL], [dsymutil], [dsymutil]) AC_PATH_TOOL([INSTALL_NAME_TOOL], [install_name_tool], [install_name_tool]) AC_PATH_TOOL([OTOOL], [otool], [otool]) - AC_PATH_PROGS([XORRISOFS], [xorrisofs], [xorrisofs]) + AC_PATH_PROG([ZIP], [zip], [zip]) dnl libtool will try to strip the static lib, which is a problem for dnl cross-builds because strip attempts to call a hard-coded ld, @@ -869,13 +852,7 @@ if test "$use_lcov" = "yes"; then [AC_MSG_ERROR([lcov testing requested but --coverage linker flag does not work])]) AX_CHECK_COMPILE_FLAG([--coverage],[CORE_CXXFLAGS="$CORE_CXXFLAGS --coverage"], [AC_MSG_ERROR([lcov testing requested but --coverage flag does not work])]) - dnl If coverage is enabled, and the user hasn't overridden CXXFLAGS, clear - dnl them, to prevent autoconfs "-g -O2" being added. Otherwise we'd end up - dnl with "--coverage -Og -O0 -g -O2". - if test "$CXXFLAGS_overridden" = "no"; then - CXXFLAGS="" - fi - CORE_CXXFLAGS="$CORE_CXXFLAGS -Og -O0" + CORE_CXXFLAGS="$CORE_CXXFLAGS -Og" fi if test "$use_lcov_branch" != "no"; then @@ -891,7 +868,7 @@ AX_PTHREAD dnl Check if -latomic is required for CHECK_ATOMIC -dnl The following macro will add the necessary defines to bitcoin-config.h, but +dnl The following macro will add the necessary defines to navcoin-config.h, but dnl they also need to be passed down to any subprojects. Pull the results out of dnl the cache and add them to CPPFLAGS. AC_SYS_LARGEFILE @@ -910,8 +887,6 @@ if test "$ac_cv_sys_large_files" != "" && CORE_CPPFLAGS="$CORE_CPPFLAGS -D_LARGE_FILES=$ac_cv_sys_large_files" fi -AC_SEARCH_LIBS([clock_gettime],[rt]) - if test "$enable_gprof" = "yes"; then dnl -pg is incompatible with -pie. Since hardening and profiling together doesn't make sense, dnl we simply make them mutually exclusive here. Additionally, hardened toolchains may force @@ -934,12 +909,6 @@ if test "$TARGET_OS" != "windows"; then AX_CHECK_COMPILE_FLAG([-fPIC], [PIC_FLAGS="-fPIC"]) fi -dnl Versions of gcc prior to 12.1 (commit -dnl https://github.com/gcc-mirror/gcc/commit/551aa75778a4c5165d9533cd447c8fc822f583e1) -dnl are subject to a bug, see the gccbug_90348 test case and -dnl https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90348. To work around that, set -dnl -fstack-reuse=none for all gcc builds. (Only gcc understands this flag) -AX_CHECK_COMPILE_FLAG([-fstack-reuse=none], [HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -fstack-reuse=none"]) if test "$use_hardening" != "no"; then use_hardening=yes AX_CHECK_COMPILE_FLAG([-Wstack-protector], [HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -Wstack-protector"]) @@ -949,7 +918,8 @@ if test "$use_hardening" != "no"; then case $host in *mingw*) - dnl stack-clash-protection doesn't currently work, and likely should just be skipped for Windows. + dnl stack-clash-protection doesn't compile with GCC 10 and earlier. + dnl In any case, it is a no-op for Windows. dnl See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90458 for more details. ;; *) @@ -957,6 +927,11 @@ if test "$use_hardening" != "no"; then ;; esac + case $host in + *aarch64*) + AX_CHECK_COMPILE_FLAG([-mbranch-protection=bti], [HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -mbranch-protection=bti"]) + ;; + esac dnl When enable_debug is yes, all optimizations are disabled. dnl However, FORTIFY_SOURCE requires that there is some level of optimization, otherwise it does nothing and just creates a compiler warning. @@ -978,12 +953,6 @@ if test "$use_hardening" != "no"; then AX_CHECK_LINK_FLAG([-Wl,-z,now], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,-z,now"], [], [$LDFLAG_WERROR]) AX_CHECK_LINK_FLAG([-Wl,-z,separate-code], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,-z,separate-code"], [], [$LDFLAG_WERROR]) AX_CHECK_LINK_FLAG([-fPIE -pie], [PIE_FLAGS="-fPIE"; HARDENED_LDFLAGS="$HARDENED_LDFLAGS -pie"], [], [$CXXFLAG_WERROR]) - - case $host in - *mingw*) - AC_CHECK_LIB([ssp], [main], [], [AC_MSG_ERROR([libssp missing])]) - ;; - esac fi dnl These flags are specific to ld64, and may cause issues with other linkers. @@ -992,7 +961,7 @@ dnl "ad_strip" as the symbol for the entry point. if test "$TARGET_OS" = "darwin"; then AX_CHECK_LINK_FLAG([-Wl,-dead_strip], [CORE_LDFLAGS="$CORE_LDFLAGS -Wl,-dead_strip"], [], [$LDFLAG_WERROR]) AX_CHECK_LINK_FLAG([-Wl,-dead_strip_dylibs], [CORE_LDFLAGS="$CORE_LDFLAGS -Wl,-dead_strip_dylibs"], [], [$LDFLAG_WERROR]) - AX_CHECK_LINK_FLAG([-Wl,-bind_at_load], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,-bind_at_load"], [], [$LDFLAG_WERROR]) + AX_CHECK_LINK_FLAG([-Wl,-fixup_chains], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,-fixup_chains"], [], [$LDFLAG_WERROR]) fi AC_CHECK_HEADERS([endian.h sys/endian.h byteswap.h sys/select.h sys/prctl.h sys/sysctl.h vm/vm_param.h sys/vmmeter.h sys/resources.h]) @@ -1057,7 +1026,7 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], dnl Check for posix_fallocate AC_MSG_CHECKING([for posix_fallocate]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ - // same as in src/util/system.cpp + // same as in src/util/fs_helpers.cpp #ifdef __linux__ #ifdef _POSIX_C_SOURCE #undef _POSIX_C_SOURCE @@ -1155,17 +1124,16 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], ) dnl Check for different ways of gathering OS randomness -AC_MSG_CHECKING([for Linux getrandom syscall]) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include - #include - #include ]], - [[ syscall(SYS_getrandom, nullptr, 32, 0); ]])], - [ AC_MSG_RESULT([yes]); AC_DEFINE([HAVE_SYS_GETRANDOM], [1], [Define this symbol if the Linux getrandom system call is available]) ], +AC_MSG_CHECKING([for Linux getrandom function]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ + #include ]], + [[ getrandom(nullptr, 32, 0); ]])], + [ AC_MSG_RESULT([yes]); AC_DEFINE([HAVE_GETRANDOM], [1], [Define this symbol if the Linux getrandom function call is available]) ], [ AC_MSG_RESULT([no])] ) -AC_MSG_CHECKING([for getentropy via random.h]) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include +AC_MSG_CHECKING([for getentropy via sys/random.h]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #include ]], [[ getentropy(nullptr, 32) ]])], [ AC_MSG_RESULT([yes]); AC_DEFINE([HAVE_GETENTROPY_RAND], [1], [Define this symbol if the BSD getentropy system call is available with sys/random.h]) ], @@ -1297,14 +1265,14 @@ AC_DEFUN([SUPPRESS_WARNINGS], dnl enable-fuzz should disable all other targets if test "$enable_fuzz" = "yes"; then AC_MSG_WARN([enable-fuzz will disable all other targets and force --enable-fuzz-binary=yes]) - build_bitcoin_utils=no - build_bitcoin_cli=no - build_bitcoin_tx=no - build_bitcoin_util=no - build_bitcoin_chainstate=no - build_bitcoin_wallet=no - build_bitcoind=no - build_bitcoin_libs=no + build_navcoin_utils=no + build_navcoin_cli=no + build_navcoin_tx=no + build_navcoin_util=no + build_navcoin_chainstate=no + build_navcoin_wallet=no + build_navcoind=no + build_navcoin_libs=no use_bench=no use_tests=no use_external_signer=no @@ -1316,26 +1284,6 @@ if test "$enable_fuzz" = "yes"; then AX_CHECK_PREPROC_FLAG([-DABORT_ON_FAILED_ASSUME], [DEBUG_CPPFLAGS="$DEBUG_CPPFLAGS -DABORT_ON_FAILED_ASSUME"], [], [$CXXFLAG_WERROR]) fi -if test "$enable_build_libblsct_only" = "yes"; then - AC_MSG_WARN([enable-build-libblsct-only will disable all other targets overriding enable-fuzz]) - build_bitcoin_utils=no - build_bitcoin_cli=no - build_bitcoin_tx=no - build_bitcoin_util=no - build_bitcoin_chainstate=no - build_bitcoin_wallet=no - build_bitcoind=no - build_bitcoin_libs=no - use_bench=no - use_tests=no - use_external_signer=no - use_upnp=no - use_natpmp=no - use_zmq=no - enable_fuzz=no - enable_fuzz_binary=no -fi - if test "$enable_fuzz_binary" = "yes"; then AC_MSG_CHECKING([whether main function is needed for fuzz binary]) AX_CHECK_LINK_FLAG( @@ -1351,8 +1299,6 @@ if test "$enable_fuzz_binary" = "yes"; then ]],[[ */ int not_main() { ]])]) - - CHECK_RUNTIME_LIB fi if test "$enable_wallet" != "no"; then @@ -1398,7 +1344,9 @@ if test "$use_usdt" != "no"; then AC_COMPILE_IFELSE([ AC_LANG_PROGRAM( [#include ], - [DTRACE_PROBE("context", "event");] + [DTRACE_PROBE(context, event); + int a, b, c, d, e, f, g; + DTRACE_PROBE7(context, event, a, b, c, d, e, f, g);] )], [AC_MSG_RESULT([yes]); AC_DEFINE([ENABLE_TRACING], [1], [Define to 1 to enable tracepoints for Userspace, Statically Defined Tracing])], [AC_MSG_RESULT([no]); use_usdt=no;] @@ -1406,7 +1354,7 @@ if test "$use_usdt" != "no"; then fi AM_CONDITIONAL([ENABLE_USDT_TRACEPOINTS], [test "$use_usdt" = "yes"]) -if test "$build_bitcoind$use_bench$use_tests" = "nononono"; then +if test "$build_navcoind$use_bench$use_tests" = "nonono"; then use_upnp=no use_natpmp=no use_zmq=no @@ -1416,15 +1364,13 @@ dnl Check for libminiupnpc (optional) if test "$use_upnp" != "no"; then TEMP_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS $MINIUPNPC_CPPFLAGS" - AC_CHECK_HEADERS( - [miniupnpc/miniupnpc.h miniupnpc/upnpcommands.h miniupnpc/upnperrors.h], - [AC_CHECK_LIB([miniupnpc], [upnpDiscover], [MINIUPNPC_LIBS="$MINIUPNPC_LIBS -lminiupnpc"], [have_miniupnpc=no], [$MINIUPNPC_LIBS])], - [have_miniupnpc=no] - ) + AC_CHECK_HEADERS([miniupnpc/miniupnpc.h miniupnpc/upnpcommands.h miniupnpc/upnperrors.h], [], [have_miniupnpc=no]) - dnl The minimum supported miniUPnPc API version is set to 17. This excludes - dnl versions with known vulnerabilities. if test "$have_miniupnpc" != "no"; then + AC_CHECK_LIB([miniupnpc], [upnpDiscover], [MINIUPNPC_LIBS="$MINIUPNPC_LIBS -lminiupnpc"], [have_miniupnpc=no], [$MINIUPNPC_LIBS]) + + dnl The minimum supported miniUPnPc API version is set to 17. This excludes + dnl versions with known vulnerabilities. AC_MSG_CHECKING([whether miniUPnPc API version is supported]) AC_PREPROC_IFELSE([AC_LANG_PROGRAM([[ @%:@include @@ -1449,13 +1395,16 @@ dnl Check for libnatpmp (optional). if test "$use_natpmp" != "no"; then TEMP_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS $NATPMP_CPPFLAGS" - AC_CHECK_HEADERS([natpmp.h], - [AC_CHECK_LIB([natpmp], [initnatpmp], [NATPMP_LIBS="$NATPMP_LIBS -lnatpmp"], [have_natpmp=no], [$NATPMP_LIBS])], - [have_natpmp=no]) + AC_CHECK_HEADERS([natpmp.h], [], [have_natpmp=no]) + + if test "$have_natpmp" != "no"; then + AC_CHECK_LIB([natpmp], [initnatpmp], [NATPMP_LIBS="$NATPMP_LIBS -lnatpmp"], [have_natpmp=no], [$NATPMP_LIBS]) + fi + CPPFLAGS="$TEMP_CPPFLAGS" fi -if test "$build_bitcoin_wallet$build_bitcoin_cli$build_bitcoin_tx$build_bitcoin_util$build_bitcoind$use_tests$use_bench$enable_fuzz_binary" = "nonononononononono"; then +if test "$build_navcoin_wallet$build_navcoin_cli$build_navcoin_tx$build_navcoin_util$build_navcoind$use_tests$use_bench$enable_fuzz_binary" = "nononononononono"; then use_boost=no else use_boost=yes @@ -1464,9 +1413,9 @@ fi if test "$use_boost" = "yes"; then dnl Check for Boost headers - AX_BOOST_BASE([1.64.0],[],[AC_MSG_ERROR([Boost is not available!])]) + AX_BOOST_BASE([1.73.0],[],[AC_MSG_ERROR([Boost is not available!])]) if test "$want_boost" = "no"; then - AC_MSG_ERROR([only libbitcoinconsensus can be built without Boost]) + AC_MSG_ERROR([only libnavcoinconsensus can be built without Boost]) fi dnl we don't use multi_index serialization @@ -1479,10 +1428,6 @@ if test "$use_boost" = "yes"; then AX_CHECK_PREPROC_FLAG([-DBOOST_NO_CXX98_FUNCTION_BASE], [BOOST_CPPFLAGS="$BOOST_CPPFLAGS -DBOOST_NO_CXX98_FUNCTION_BASE"], [], [$CXXFLAG_WERROR], [AC_LANG_PROGRAM([[#include ]])]) - if test "$enable_debug" = "yes" || test "$enable_fuzz" = "yes"; then - BOOST_CPPFLAGS="$BOOST_CPPFLAGS -DBOOST_MULTI_INDEX_ENABLE_SAFE_MODE" - fi - if test "$suppress_external_warnings" != "no"; then BOOST_CPPFLAGS=SUPPRESS_WARNINGS($BOOST_CPPFLAGS) fi @@ -1519,9 +1464,19 @@ if test "$use_external_signer" != "no"; then CXXFLAGS="$TEMP_CXXFLAGS" AC_MSG_RESULT([$have_boost_process]) if test "$have_boost_process" = "yes"; then - use_external_signer="yes" - AC_DEFINE([ENABLE_EXTERNAL_SIGNER], [1], [Define if external signer support is enabled]) - AC_DEFINE([BOOST_PROCESS_USE_STD_FS], [1], [Defined to avoid Boost::Process trying to use Boost Filesystem]) + case $host in + dnl Boost Process for Windows uses Boost ASIO. Boost ASIO performs + dnl pre-main init of Windows networking libraries, which we do not + dnl want. + *mingw*) + use_external_signer="no" + ;; + *) + use_external_signer="yes" + AC_DEFINE([ENABLE_EXTERNAL_SIGNER], [1], [Define if external signer support is enabled]) + AC_DEFINE([BOOST_PROCESS_USE_STD_FS], [1], [Defined to avoid Boost::Process trying to use Boost Filesystem]) + ;; + esac else if test "$use_external_signer" = "yes"; then AC_MSG_ERROR([External signing is not supported for this Boost version]) @@ -1531,36 +1486,6 @@ if test "$use_external_signer" != "no"; then fi AM_CONDITIONAL([ENABLE_EXTERNAL_SIGNER], [test "$use_external_signer" = "yes"]) -dnl Do not compile with syscall sandbox support when compiling under the sanitizers. -dnl The sanitizers introduce use of syscalls that are not typically used in navcoind -dnl (such as execve when the sanitizers execute llvm-symbolizer). -if test "$use_sanitizers" != ""; then - AC_MSG_WARN([Specifying --with-sanitizers forces --without-seccomp since the sanitizers introduce use of syscalls not allowed by the navcoind syscall sandbox (-sandbox=).]) - seccomp_found=no -fi -if test "$seccomp_found" != "no"; then - AC_MSG_CHECKING([for seccomp-bpf (Linux x86-64)]) - AC_PREPROC_IFELSE([AC_LANG_PROGRAM([[ - @%:@include - ]], [[ - #if !defined(__x86_64__) - # error Syscall sandbox is an experimental feature currently available only under Linux x86-64. - #endif - ]])],[ - AC_MSG_RESULT([yes]) - seccomp_found="yes" - AC_DEFINE([USE_SYSCALL_SANDBOX], [1], [Define this symbol to build with syscall sandbox support.]) - ],[ - AC_MSG_RESULT([no]) - seccomp_found="no" - ]) -fi -dnl Currently only enable -sandbox= feature if seccomp is found. -dnl In the future, sandboxing could be also be supported with other -dnl sandboxing mechanisms besides seccomp. -use_syscall_sandbox=$seccomp_found -AM_CONDITIONAL([ENABLE_SYSCALL_SANDBOX], [test "$use_syscall_sandbox" != "no"]) - dnl Check for reduced exports if test "$use_reduce_exports" = "yes"; then AX_CHECK_COMPILE_FLAG([-fvisibility=hidden], [CORE_CXXFLAGS="$CORE_CXXFLAGS -fvisibility=hidden"], @@ -1578,7 +1503,7 @@ fi dnl libevent check use_libevent=no -if test "$build_bitcoin_cli$build_bitcoind$enable_fuzz_binary$use_tests$use_bench" != "nonononono"; then +if test "$build_navcoin_cli$build_navcoind$enable_fuzz_binary$use_tests$use_bench" != "nonononono"; then PKG_CHECK_MODULES([EVENT], [libevent >= 2.1.8], [use_libevent=yes], [AC_MSG_ERROR([libevent version 2.1.8 or greater not found.])]) if test "$TARGET_OS" != "windows"; then PKG_CHECK_MODULES([EVENT_PTHREADS], [libevent_pthreads >= 2.1.8], [], [AC_MSG_ERROR([libevent_pthreads version 2.1.8 or greater not found.])]) @@ -1613,12 +1538,9 @@ dnl ZMQ check if test "$use_zmq" = "yes"; then PKG_CHECK_MODULES([ZMQ], [libzmq >= 4], - AC_DEFINE([ENABLE_ZMQ], [1], [Define to 1 to enable ZMQ functions]), - [AC_DEFINE([ENABLE_ZMQ], [0], [Define to 1 to enable ZMQ functions]) - AC_MSG_WARN([libzmq version 4.x or greater not found, disabling]) + AC_DEFINE([ENABLE_ZMQ], [1], [Define this symbol to enable ZMQ functions]), + [AC_MSG_WARN([libzmq version 4.x or greater not found, disabling]) use_zmq=no]) -else - AC_DEFINE_UNQUOTED([ENABLE_ZMQ], [0], [Define to 1 to enable ZMQ functions]) fi if test "$use_zmq" = "yes"; then @@ -1630,6 +1552,8 @@ if test "$use_zmq" = "yes"; then esac fi +AM_CONDITIONAL([ENABLE_ZMQ], [test "$use_zmq" = "yes"]) + dnl libmultiprocess library check libmultiprocess_found=no @@ -1656,7 +1580,7 @@ else fi AM_CONDITIONAL([BUILD_MULTIPROCESS], [test "$build_multiprocess" = "yes"]) -AM_CONDITIONAL([BUILD_BITCOIN_NODE], [test "$build_multiprocess" = "yes"]) +AM_CONDITIONAL([BUILD_NAVCOIN_NODE], [test "$build_multiprocess" = "yes"]) dnl codegen tools check @@ -1669,46 +1593,46 @@ if test "$build_multiprocess" != "no"; then AC_SUBST(MPGEN_PREFIX) fi -AC_MSG_CHECKING([whether to build bitcoind]) -AM_CONDITIONAL([BUILD_BITCOIND], [test $build_bitcoind = "yes"]) -AC_MSG_RESULT($build_bitcoind) +AC_MSG_CHECKING([whether to build navcoind]) +AM_CONDITIONAL([BUILD_NAVCOIND], [test $build_navcoind = "yes"]) +AC_MSG_RESULT($build_navcoind) -AC_MSG_CHECKING([whether to build bitcoin-cli]) -AM_CONDITIONAL([BUILD_BITCOIN_CLI], [test $build_bitcoin_cli = "yes"]) -AC_MSG_RESULT($build_bitcoin_cli) +AC_MSG_CHECKING([whether to build navcoin-cli]) +AM_CONDITIONAL([BUILD_NAVCOIN_CLI], [test $build_navcoin_cli = "yes"]) +AC_MSG_RESULT($build_navcoin_cli) -AC_MSG_CHECKING([whether to build bitcoin-tx]) -AM_CONDITIONAL([BUILD_BITCOIN_TX], [test $build_bitcoin_tx = "yes"]) -AC_MSG_RESULT($build_bitcoin_tx) +AC_MSG_CHECKING([whether to build navcoin-tx]) +AM_CONDITIONAL([BUILD_NAVCOIN_TX], [test $build_navcoin_tx = "yes"]) +AC_MSG_RESULT($build_navcoin_tx) -AC_MSG_CHECKING([whether to build bitcoin-wallet]) -AM_CONDITIONAL([BUILD_BITCOIN_WALLET], [test $build_bitcoin_wallet = "yes"]) -AC_MSG_RESULT($build_bitcoin_wallet) +AC_MSG_CHECKING([whether to build navcoin-wallet]) +AM_CONDITIONAL([BUILD_NAVCOIN_WALLET], [test $build_navcoin_wallet = "yes"]) +AC_MSG_RESULT($build_navcoin_wallet) -AC_MSG_CHECKING([whether to build bitcoin-util]) -AM_CONDITIONAL([BUILD_BITCOIN_UTIL], [test $build_bitcoin_util = "yes"]) -AC_MSG_RESULT($build_bitcoin_util) +AC_MSG_CHECKING([whether to build navcoin-util]) +AM_CONDITIONAL([BUILD_NAVCOIN_UTIL], [test $build_navcoin_util = "yes"]) +AC_MSG_RESULT($build_navcoin_util) -AC_MSG_CHECKING([whether to build experimental bitcoin-chainstate]) -if test "$build_bitcoin_chainstate" = "yes"; then +AC_MSG_CHECKING([whether to build experimental navcoin-chainstate]) +if test "$build_navcoin_chainstate" = "yes"; then if test "$build_experimental_kernel_lib" = "no"; then - AC_MSG_ERROR([experimental bitcoin-chainstate cannot be built without the experimental bitcoinkernel library. Use --with-experimental-kernel-lib]); + AC_MSG_ERROR([experimental navcoin-chainstate cannot be built without the experimental navcoinkernel library. Use --with-experimental-kernel-lib]); fi fi -AM_CONDITIONAL([BUILD_BITCOIN_CHAINSTATE], [test $build_bitcoin_chainstate = "yes"]) -AC_MSG_RESULT($build_bitcoin_chainstate) +AM_CONDITIONAL([BUILD_NAVCOIN_CHAINSTATE], [test $build_navcoin_chainstate = "yes"]) +AC_MSG_RESULT($build_navcoin_chainstate) AC_MSG_CHECKING([whether to build libraries]) -AM_CONDITIONAL([BUILD_BITCOIN_LIBS], [test $build_bitcoin_libs = "yes"]) +AM_CONDITIONAL([BUILD_NAVCOIN_LIBS], [test $build_navcoin_libs = "yes"]) -if test "$build_bitcoin_libs" = "yes"; then +if test "$build_navcoin_libs" = "yes"; then AC_DEFINE([HAVE_CONSENSUS_LIB], [1], [Define this symbol if the consensus lib has been built]) - AC_CONFIG_FILES([libbitcoinconsensus.pc:libbitcoinconsensus.pc.in]) + AC_CONFIG_FILES([libnavcoinconsensus.pc:libnavcoinconsensus.pc.in]) fi -AM_CONDITIONAL([BUILD_BITCOIN_KERNEL_LIB], [test "$build_experimental_kernel_lib" != "no" && ( test "$build_experimental_kernel_lib" = "yes" || test "$build_bitcoin_chainstate" = "yes" )]) +AM_CONDITIONAL([BUILD_NAVCOIN_KERNEL_LIB], [test "$build_experimental_kernel_lib" != "no" && ( test "$build_experimental_kernel_lib" = "yes" || test "$build_navcoin_chainstate" = "yes" )]) -AC_MSG_RESULT($build_bitcoin_libs) +AC_MSG_RESULT($build_navcoin_libs) AC_LANG_POP @@ -1785,8 +1709,6 @@ else fi fi -AM_CONDITIONAL([ENABLE_ZMQ], [test "$use_zmq" = "yes"]) - AC_MSG_CHECKING([whether to build test_navcoin]) if test "$use_tests" = "yes"; then if test "$enable_fuzz" = "yes"; then @@ -1807,7 +1729,7 @@ else AC_MSG_RESULT([no]) fi -if test "$build_bitcoin_wallet$build_bitcoin_cli$build_bitcoin_tx$build_bitcoin_util$build_bitcoin_libs$build_bitcoind$enable_fuzz_binary$use_bench$use_tests" = "nononononononononono"; then +if test "$build_navcoin_wallet$build_navcoin_cli$build_navcoin_tx$build_navcoin_util$build_navcoin_libs$build_navcoind$enable_fuzz_binary$use_bench$use_tests" = "nonononononononono"; then AC_MSG_ERROR([No targets! Please specify at least one of: --with-utils --with-libs --with-daemon --enable-fuzz(-binary) --enable-bench or --enable-tests]) fi @@ -1858,14 +1780,14 @@ AC_SUBST(COPYRIGHT_YEAR, _COPYRIGHT_YEAR) AC_SUBST(COPYRIGHT_HOLDERS, "_COPYRIGHT_HOLDERS") AC_SUBST(COPYRIGHT_HOLDERS_SUBSTITUTION, "_COPYRIGHT_HOLDERS_SUBSTITUTION") AC_SUBST(COPYRIGHT_HOLDERS_FINAL, "_COPYRIGHT_HOLDERS_FINAL") -AC_SUBST(BITCOIN_DAEMON_NAME) -AC_SUBST(BITCOIN_TEST_NAME) -AC_SUBST(BITCOIN_CLI_NAME) -AC_SUBST(BITCOIN_TX_NAME) -AC_SUBST(BITCOIN_UTIL_NAME) -AC_SUBST(BITCOIN_CHAINSTATE_NAME) -AC_SUBST(BITCOIN_WALLET_TOOL_NAME) -AC_SUBST(BITCOIN_MP_NODE_NAME) +AC_SUBST(NAVCOIN_DAEMON_NAME) +AC_SUBST(NAVCOIN_TEST_NAME) +AC_SUBST(NAVCOIN_CLI_NAME) +AC_SUBST(NAVCOIN_TX_NAME) +AC_SUBST(NAVCOIN_UTIL_NAME) +AC_SUBST(NAVCOIN_CHAINSTATE_NAME) +AC_SUBST(NAVCOIN_WALLET_TOOL_NAME) +AC_SUBST(NAVCOIN_MP_NODE_NAME) AC_SUBST(RELDFLAGS) AC_SUBST(CORE_LDFLAGS) @@ -1881,8 +1803,6 @@ AC_SUBST(GPROF_LDFLAGS) AC_SUBST(HARDENED_CXXFLAGS) AC_SUBST(HARDENED_CPPFLAGS) AC_SUBST(HARDENED_LDFLAGS) -AC_SUBST(LTO_CXXFLAGS) -AC_SUBST(LTO_LDFLAGS) AC_SUBST(PIC_FLAGS) AC_SUBST(PIE_FLAGS) AC_SUBST(SANITIZER_CXXFLAGS) @@ -1933,6 +1853,9 @@ CPPFLAGS_TEMP="$CPPFLAGS" unset CPPFLAGS CPPFLAGS="$CPPFLAGS_TEMP" +if test -n "$use_sanitizers"; then + export SECP_CFLAGS="$SECP_CFLAGS $SANITIZER_CFLAGS" +fi ac_configure_args="${ac_configure_args} --disable-shared --with-pic --enable-benchmark=no --enable-module-recovery --disable-module-ecdh" AC_CONFIG_SUBDIRS([src/secp256k1]) @@ -1946,12 +1869,22 @@ case ${OS} in ;; esac +dnl An old hack similar to a98356fee to remove hard-coded +dnl bind_at_load flag from libtool +case $host in + *darwin*) + AC_MSG_RESULT([Removing -Wl,bind_at_load from libtool.]) + sed < libtool > libtool-2 '/bind_at_load/d' + mv libtool-2 libtool + chmod 755 libtool + ;; +esac + echo echo "Options used to compile and link:" echo " external signer = $use_external_signer" echo " multiprocess = $build_multiprocess" -echo " with experimental syscall sandbox support = $use_syscall_sandbox" -echo " with libs = $build_bitcoin_libs" +echo " with libs = $build_navcoin_libs" echo " with wallet = $enable_wallet" if test "$enable_wallet" != "no"; then echo " with sqlite = $use_sqlite" @@ -1973,18 +1906,16 @@ echo " sanitizers = $use_sanitizers" echo " debug enabled = $enable_debug" echo " gprof enabled = $enable_gprof" echo " werror = $enable_werror" -echo " LTO = $enable_lto" -echo " libblsct only = $enable_build_libblsct_only" echo echo " target os = $host_os" echo " build os = $build_os" echo echo " CC = $CC" -echo " CFLAGS = $PTHREAD_CFLAGS $CFLAGS" +echo " CFLAGS = $PTHREAD_CFLAGS $SANITIZER_CFLAGS $CFLAGS" echo " CPPFLAGS = $DEBUG_CPPFLAGS $HARDENED_CPPFLAGS $CORE_CPPFLAGS $CPPFLAGS" echo " CXX = $CXX" -echo " CXXFLAGS = $LTO_CXXFLAGS $DEBUG_CXXFLAGS $HARDENED_CXXFLAGS $WARN_CXXFLAGS $NOWARN_CXXFLAGS $ERROR_CXXFLAGS $GPROF_CXXFLAGS $CORE_CXXFLAGS $CXXFLAGS" -echo " LDFLAGS = $LTO_LDFLAGS $PTHREAD_LIBS $HARDENED_LDFLAGS $GPROF_LDFLAGS $CORE_LDFLAGS $LDFLAGS" +echo " CXXFLAGS = $CORE_CXXFLAGS $DEBUG_CXXFLAGS $HARDENED_CXXFLAGS $WARN_CXXFLAGS $NOWARN_CXXFLAGS $ERROR_CXXFLAGS $GPROF_CXXFLAGS $SANITIZER_CXXFLAGS $CXXFLAGS" +echo " LDFLAGS = $PTHREAD_LIBS $HARDENED_LDFLAGS $GPROF_LDFLAGS $SANITIZER_LDFLAGS $CORE_LDFLAGS $LDFLAGS" echo " AR = $AR" echo " ARFLAGS = $ARFLAGS" echo diff --git a/contrib/README.md b/contrib/README.md index 3c6e978061169..f375993ac4b76 100644 --- a/contrib/README.md +++ b/contrib/README.md @@ -35,7 +35,7 @@ Test and Verify Tools ### [TestGen](/contrib/testgen) ### Utilities to generate test vectors for the data-driven Bitcoin tests. -### [Verify Binaries](/contrib/verifybinaries) ### +### [Verify-Binaries](/contrib/verify-binaries) ### This script attempts to download and verify the signature file SHA256SUMS.asc from bitcoin.org. Command Line Tools diff --git a/contrib/completions/bash/bitcoin-cli.bash-completion b/contrib/completions/bash/bitcoin-cli.bash similarity index 100% rename from contrib/completions/bash/bitcoin-cli.bash-completion rename to contrib/completions/bash/bitcoin-cli.bash diff --git a/contrib/completions/bash/bitcoin-tx.bash-completion b/contrib/completions/bash/bitcoin-tx.bash similarity index 100% rename from contrib/completions/bash/bitcoin-tx.bash-completion rename to contrib/completions/bash/bitcoin-tx.bash diff --git a/contrib/completions/bash/bitcoind.bash-completion b/contrib/completions/bash/bitcoind.bash similarity index 100% rename from contrib/completions/bash/bitcoind.bash-completion rename to contrib/completions/bash/bitcoind.bash diff --git a/contrib/debian/copyright b/contrib/debian/copyright index ca430170a1d6e..dafc92f8ad72d 100644 --- a/contrib/debian/copyright +++ b/contrib/debian/copyright @@ -5,7 +5,7 @@ Upstream-Contact: Satoshi Nakamoto Source: https://github.com/bitcoin/bitcoin Files: * -Copyright: 2009-2023, Bitcoin Core Developers +Copyright: 2009-2024, Bitcoin Core Developers License: Expat Comment: The Bitcoin Core Developers encompasses all contributors to the project, listed in the release notes or the git log. diff --git a/contrib/devtools/README.md b/contrib/devtools/README.md index 76aae5c3f5905..31256fe4eea44 100644 --- a/contrib/devtools/README.md +++ b/contrib/devtools/README.md @@ -83,13 +83,23 @@ A small script to automatically create manpages in ../../doc/man by running the This requires help2man which can be found at: https://www.gnu.org/software/help2man/ With in-tree builds this tool can be run from any directory within the -repostitory. To use this tool with out-of-tree builds set `BUILDDIR`. For +repository. To use this tool with out-of-tree builds set `BUILDDIR`. For example: ```bash BUILDDIR=$PWD/build contrib/devtools/gen-manpages.py ``` +headerssync-params.py +===================== + +A script to generate optimal parameters for the headerssync module (src/headerssync.cpp). It takes no command-line +options, as all its configuration is set at the top of the file. It runs many times faster inside PyPy. Invocation: + +```bash +pypy3 contrib/devtools/headerssync-params.py +``` + gen-bitcoin-conf.sh =================== diff --git a/contrib/devtools/bitcoin-tidy/CMakeLists.txt b/contrib/devtools/bitcoin-tidy/CMakeLists.txt new file mode 100644 index 0000000000000..35e60d1d87e2b --- /dev/null +++ b/contrib/devtools/bitcoin-tidy/CMakeLists.txt @@ -0,0 +1,56 @@ +cmake_minimum_required(VERSION 3.9) + +project(bitcoin-tidy VERSION 1.0.0 DESCRIPTION "clang-tidy checks for Bitcoin Core") + +include(GNUInstallDirs) + +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED True) +set(CMAKE_CXX_EXTENSIONS False) + +# TODO: Figure out how to avoid the terminfo check +find_package(LLVM REQUIRED CONFIG) +find_program(CLANG_TIDY_EXE NAMES "clang-tidy-${LLVM_VERSION_MAJOR}" "clang-tidy" HINTS ${LLVM_TOOLS_BINARY_DIR}) +message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") +message(STATUS "Found clang-tidy: ${CLANG_TIDY_EXE}") + +add_library(bitcoin-tidy MODULE bitcoin-tidy.cpp logprintf.cpp) +target_include_directories(bitcoin-tidy SYSTEM PRIVATE ${LLVM_INCLUDE_DIRS}) + +# Disable RTTI and exceptions as necessary +if (MSVC) + target_compile_options(bitcoin-tidy PRIVATE /GR-) +else() + target_compile_options(bitcoin-tidy PRIVATE -fno-rtti) + target_compile_options(bitcoin-tidy PRIVATE -fno-exceptions) +endif() + +if(CMAKE_HOST_APPLE) + # ld64 expects no undefined symbols by default + target_link_options(bitcoin-tidy PRIVATE -Wl,-flat_namespace) + target_link_options(bitcoin-tidy PRIVATE -Wl,-undefined -Wl,suppress) +endif() + +# Add warnings +if (MSVC) + target_compile_options(bitcoin-tidy PRIVATE /W4) +else() + target_compile_options(bitcoin-tidy PRIVATE -Wall) + target_compile_options(bitcoin-tidy PRIVATE -Wextra) +endif() + +if(CMAKE_VERSION VERSION_LESS 3.27) + set(CLANG_TIDY_COMMAND "${CLANG_TIDY_EXE}" "--load=${CMAKE_BINARY_DIR}/${CMAKE_SHARED_MODULE_PREFIX}bitcoin-tidy${CMAKE_SHARED_MODULE_SUFFIX}" "-checks=-*,bitcoin-*") +else() + # CLANG_TIDY_COMMAND supports generator expressions as of 3.27 + set(CLANG_TIDY_COMMAND "${CLANG_TIDY_EXE}" "--load=$" "-checks=-*,bitcoin-*") +endif() + +# Create a dummy library that runs clang-tidy tests as a side-effect of building +add_library(bitcoin-tidy-tests OBJECT EXCLUDE_FROM_ALL example_logprintf.cpp) +add_dependencies(bitcoin-tidy-tests bitcoin-tidy) + +set_target_properties(bitcoin-tidy-tests PROPERTIES CXX_CLANG_TIDY "${CLANG_TIDY_COMMAND}") + + +install(TARGETS bitcoin-tidy LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}) diff --git a/contrib/devtools/bitcoin-tidy/README b/contrib/devtools/bitcoin-tidy/README new file mode 100644 index 0000000000000..c15e07c4ede54 --- /dev/null +++ b/contrib/devtools/bitcoin-tidy/README @@ -0,0 +1,11 @@ +# Bitcoin Tidy + +Example Usage: + +```bash +cmake -S . -B build -DLLVM_DIR=$(llvm-config --cmakedir) -DCMAKE_BUILD_TYPE=Release + +cmake --build build -j$(nproc) + +cmake --build build --target bitcoin-tidy-tests -j$(nproc) +``` diff --git a/contrib/devtools/bitcoin-tidy/bitcoin-tidy.cpp b/contrib/devtools/bitcoin-tidy/bitcoin-tidy.cpp new file mode 100644 index 0000000000000..0f34d37793877 --- /dev/null +++ b/contrib/devtools/bitcoin-tidy/bitcoin-tidy.cpp @@ -0,0 +1,22 @@ +// Copyright (c) 2023 Bitcoin Developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include "logprintf.h" + +#include +#include + +class BitcoinModule final : public clang::tidy::ClangTidyModule +{ +public: + void addCheckFactories(clang::tidy::ClangTidyCheckFactories& CheckFactories) override + { + CheckFactories.registerCheck("bitcoin-unterminated-logprintf"); + } +}; + +static clang::tidy::ClangTidyModuleRegistry::Add + X("bitcoin-module", "Adds bitcoin checks."); + +volatile int BitcoinModuleAnchorSource = 0; diff --git a/contrib/devtools/bitcoin-tidy/example_logprintf.cpp b/contrib/devtools/bitcoin-tidy/example_logprintf.cpp new file mode 100644 index 0000000000000..a12a666c086d6 --- /dev/null +++ b/contrib/devtools/bitcoin-tidy/example_logprintf.cpp @@ -0,0 +1,108 @@ +// Copyright (c) 2023 Bitcoin Developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include + +// Test for bitcoin-unterminated-logprintf + +enum LogFlags { + NONE +}; + +enum Level { + None +}; + +template +static inline void LogPrintf_(const std::string& logging_function, const std::string& source_file, const int source_line, const LogFlags flag, const Level level, const char* fmt, const Args&... args) +{ +} + +#define LogPrintLevel_(category, level, ...) LogPrintf_(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__) +#define LogPrintf(...) LogPrintLevel_(LogFlags::NONE, Level::None, __VA_ARGS__) + +#define LogPrint(category, ...) \ + do { \ + LogPrintf(__VA_ARGS__); \ + } while (0) + + +class CWallet +{ + std::string GetDisplayName() const + { + return "default wallet"; + } + +public: + template + void WalletLogPrintf(const char* fmt, Params... parameters) const + { + LogPrintf(("%s " + std::string{fmt}).c_str(), GetDisplayName(), parameters...); + }; +}; + +struct ScriptPubKeyMan +{ + std::string GetDisplayName() const + { + return "default wallet"; + } + + template + void WalletLogPrintf(const char* fmt, Params... parameters) const + { + LogPrintf(("%s " + std::string{fmt}).c_str(), GetDisplayName(), parameters...); + }; +}; + +void good_func() +{ + LogPrintf("hello world!\n"); +} +void good_func2() +{ + CWallet wallet; + wallet.WalletLogPrintf("hi\n"); + ScriptPubKeyMan spkm; + spkm.WalletLogPrintf("hi\n"); + + const CWallet& walletref = wallet; + walletref.WalletLogPrintf("hi\n"); + + auto* walletptr = new CWallet(); + walletptr->WalletLogPrintf("hi\n"); + delete walletptr; +} +void bad_func() +{ + LogPrintf("hello world!"); +} +void bad_func2() +{ + LogPrintf(""); +} +void bad_func3() +{ + // Ending in "..." has no special meaning. + LogPrintf("hello world!..."); +} +void bad_func4_ignored() +{ + LogPrintf("hello world!"); // NOLINT(bitcoin-unterminated-logprintf) +} +void bad_func5() +{ + CWallet wallet; + wallet.WalletLogPrintf("hi"); + ScriptPubKeyMan spkm; + spkm.WalletLogPrintf("hi"); + + const CWallet& walletref = wallet; + walletref.WalletLogPrintf("hi"); + + auto* walletptr = new CWallet(); + walletptr->WalletLogPrintf("hi"); + delete walletptr; +} diff --git a/contrib/devtools/bitcoin-tidy/logprintf.cpp b/contrib/devtools/bitcoin-tidy/logprintf.cpp new file mode 100644 index 0000000000000..36beac28c86c3 --- /dev/null +++ b/contrib/devtools/bitcoin-tidy/logprintf.cpp @@ -0,0 +1,60 @@ +// Copyright (c) 2023 Bitcoin Developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include "logprintf.h" + +#include +#include + + +namespace { +AST_MATCHER(clang::StringLiteral, unterminated) +{ + size_t len = Node.getLength(); + if (len > 0 && Node.getCodeUnit(len - 1) == '\n') { + return false; + } + return true; +} +} // namespace + +namespace bitcoin { + +void LogPrintfCheck::registerMatchers(clang::ast_matchers::MatchFinder* finder) +{ + using namespace clang::ast_matchers; + + /* + Logprintf(..., ..., ..., ..., ..., "foo", ...) + */ + + finder->addMatcher( + callExpr( + callee(functionDecl(hasName("LogPrintf_"))), + hasArgument(5, stringLiteral(unterminated()).bind("logstring"))), + this); + + /* + auto walletptr = &wallet; + wallet.WalletLogPrintf("foo"); + wallet->WalletLogPrintf("foo"); + */ + finder->addMatcher( + cxxMemberCallExpr( + callee(cxxMethodDecl(hasName("WalletLogPrintf"))), + hasArgument(0, stringLiteral(unterminated()).bind("logstring"))), + this); +} + +void LogPrintfCheck::check(const clang::ast_matchers::MatchFinder::MatchResult& Result) +{ + if (const clang::StringLiteral* lit = Result.Nodes.getNodeAs("logstring")) { + const clang::ASTContext& ctx = *Result.Context; + const auto user_diag = diag(lit->getEndLoc(), "Unterminated format string used with LogPrintf"); + const auto& loc = lit->getLocationOfByte(lit->getByteLength(), *Result.SourceManager, ctx.getLangOpts(), ctx.getTargetInfo()); + user_diag << clang::FixItHint::CreateInsertion(loc, "\\n"); + } +} + +} // namespace bitcoin diff --git a/contrib/devtools/bitcoin-tidy/logprintf.h b/contrib/devtools/bitcoin-tidy/logprintf.h new file mode 100644 index 0000000000000..db95dfe143e84 --- /dev/null +++ b/contrib/devtools/bitcoin-tidy/logprintf.h @@ -0,0 +1,29 @@ +// Copyright (c) 2023 Bitcoin Developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef LOGPRINTF_CHECK_H +#define LOGPRINTF_CHECK_H + +#include + +namespace bitcoin { + +// Warn about any use of LogPrintf that does not end with a newline. +class LogPrintfCheck final : public clang::tidy::ClangTidyCheck +{ +public: + LogPrintfCheck(clang::StringRef Name, clang::tidy::ClangTidyContext* Context) + : clang::tidy::ClangTidyCheck(Name, Context) {} + + bool isLanguageVersionSupported(const clang::LangOptions& LangOpts) const override + { + return LangOpts.CPlusPlus; + } + void registerMatchers(clang::ast_matchers::MatchFinder* Finder) override; + void check(const clang::ast_matchers::MatchFinder::MatchResult& Result) override; +}; + +} // namespace bitcoin + +#endif // LOGPRINTF_CHECK_H diff --git a/contrib/devtools/circular-dependencies.py b/contrib/devtools/circular-dependencies.py index b1d9f2b7db23b..b742a8cea6718 100755 --- a/contrib/devtools/circular-dependencies.py +++ b/contrib/devtools/circular-dependencies.py @@ -5,7 +5,6 @@ import sys import re -from typing import Dict, List, Set MAPPING = { 'core_read.cpp': 'core_io.cpp', @@ -33,7 +32,7 @@ def module_name(path): return None files = dict() -deps: Dict[str, Set[str]] = dict() +deps: dict[str, set[str]] = dict() RE = re.compile("^#include <(.*)>") @@ -65,7 +64,7 @@ def module_name(path): shortest_cycle = None for module in sorted(deps.keys()): # Build the transitive closure of dependencies of module - closure: Dict[str, List[str]] = dict() + closure: dict[str, list[str]] = dict() for dep in deps[module]: closure[dep] = [] while True: diff --git a/contrib/devtools/clang-format-diff.py b/contrib/devtools/clang-format-diff.py index 420bf7ff33002..e2b661d65d235 100755 --- a/contrib/devtools/clang-format-diff.py +++ b/contrib/devtools/clang-format-diff.py @@ -1,166 +1,190 @@ #!/usr/bin/env python3 # -#===- clang-format-diff.py - ClangFormat Diff Reformatter ----*- python -*--===# +# ===- clang-format-diff.py - ClangFormat Diff Reformatter ----*- python -*--===# # -# The LLVM Compiler Infrastructure +# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # -# This file is distributed under the University of Illinois Open Source -# License. -# -# ============================================================ -# -# University of Illinois/NCSA -# Open Source License -# -# Copyright (c) 2007-2015 University of Illinois at Urbana-Champaign. -# All rights reserved. -# -# Developed by: -# -# LLVM Team -# -# University of Illinois at Urbana-Champaign -# -# http://llvm.org -# -# Permission is hereby granted, free of charge, to any person obtaining a copy of -# this software and associated documentation files (the "Software"), to deal with -# the Software without restriction, including without limitation the rights to -# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -# of the Software, and to permit persons to whom the Software is furnished to do -# so, subject to the following conditions: -# -# * Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimers. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimers in the -# documentation and/or other materials provided with the distribution. -# -# * Neither the names of the LLVM Team, University of Illinois at -# Urbana-Champaign, nor the names of its contributors may be used to -# endorse or promote products derived from this Software without specific -# prior written permission. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE -# SOFTWARE. -# -# ============================================================ -# -#===------------------------------------------------------------------------===# - -r""" -ClangFormat Diff Reformatter -============================ +# ===------------------------------------------------------------------------===# +""" This script reads input from a unified diff and reformats all the changed lines. This is useful to reformat all the lines touched by a specific patch. Example usage for git/svn users: - git diff -U0 HEAD^ | clang-format-diff.py -p1 -i - svn diff --diff-cmd=diff -x-U0 | clang-format-diff.py -i + git diff -U0 --no-color --relative HEAD^ | {clang_format_diff} -p1 -i + svn diff --diff-cmd=diff -x-U0 | {clang_format_diff} -i +It should be noted that the filename contained in the diff is used unmodified +to determine the source file to update. Users calling this script directly +should be careful to ensure that the path in the diff is correct relative to the +current working directory. """ +from __future__ import absolute_import, division, print_function import argparse import difflib -import io import re import subprocess import sys - -# Change this to the full path if clang-format is not on the path. -binary = 'clang-format' +from io import StringIO def main(): - parser = argparse.ArgumentParser(description= - 'Reformat changed lines in diff. Without -i ' - 'option just output the diff that would be ' - 'introduced.') - parser.add_argument('-i', action='store_true', default=False, - help='apply edits to files instead of displaying a diff') - parser.add_argument('-p', metavar='NUM', default=0, - help='strip the smallest prefix containing P slashes') - parser.add_argument('-regex', metavar='PATTERN', default=None, - help='custom pattern selecting file paths to reformat ' - '(case sensitive, overrides -iregex)') - parser.add_argument('-iregex', metavar='PATTERN', default= - r'.*\.(cpp|cc|c\+\+|cxx|c|cl|h|hpp|m|mm|inc|js|ts|proto' - r'|protodevel|java)', - help='custom pattern selecting file paths to reformat ' - '(case insensitive, overridden by -regex)') - parser.add_argument('-sort-includes', action='store_true', default=False, - help='let clang-format sort include blocks') - parser.add_argument('-v', '--verbose', action='store_true', - help='be more verbose, ineffective without -i') - args = parser.parse_args() - - # Extract changed lines for each file. - filename = None - lines_by_file = {} - for line in sys.stdin: - match = re.search(r'^\+\+\+\ (.*?/){%s}(\S*)' % args.p, line) - if match: - filename = match.group(2) - if filename is None: - continue - - if args.regex is not None: - if not re.match('^%s$' % args.regex, filename): - continue - else: - if not re.match('^%s$' % args.iregex, filename, re.IGNORECASE): - continue - - match = re.search(r'^@@.*\+(\d+)(,(\d+))?', line) - if match: - start_line = int(match.group(1)) - line_count = 1 - if match.group(3): - line_count = int(match.group(3)) - if line_count == 0: - continue - end_line = start_line + line_count - 1 - lines_by_file.setdefault(filename, []).extend( - ['-lines', str(start_line) + ':' + str(end_line)]) - - # Reformat files containing changes in place. - for filename, lines in lines_by_file.items(): - if args.i and args.verbose: - print('Formatting {}'.format(filename)) - command = [binary, filename] - if args.i: - command.append('-i') - if args.sort_includes: - command.append('-sort-includes') - command.extend(lines) - command.extend(['-style=file', '-fallback-style=none']) - p = subprocess.Popen(command, - stdout=subprocess.PIPE, - stderr=None, - stdin=subprocess.PIPE, - text=True) - stdout, stderr = p.communicate() - if p.returncode != 0: - sys.exit(p.returncode) - - if not args.i: - with open(filename, encoding="utf8") as f: - code = f.readlines() - formatted_code = io.StringIO(stdout).readlines() - diff = difflib.unified_diff(code, formatted_code, - filename, filename, - '(before formatting)', '(after formatting)') - diff_string = ''.join(diff) - if len(diff_string) > 0: - sys.stdout.write(diff_string) - -if __name__ == '__main__': - main() + parser = argparse.ArgumentParser( + description=__doc__.format(clang_format_diff="%(prog)s"), + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument( + "-i", + action="store_true", + default=False, + help="apply edits to files instead of displaying a diff", + ) + parser.add_argument( + "-p", + metavar="NUM", + default=0, + help="strip the smallest prefix containing P slashes", + ) + parser.add_argument( + "-regex", + metavar="PATTERN", + default=None, + help="custom pattern selecting file paths to reformat " + "(case sensitive, overrides -iregex)", + ) + parser.add_argument( + "-iregex", + metavar="PATTERN", + default=r".*\.(?:cpp|cc|c\+\+|cxx|cppm|ccm|cxxm|c\+\+m|c|cl|h|hh|hpp" + r"|hxx|m|mm|inc|js|ts|proto|protodevel|java|cs|json|s?vh?)", + help="custom pattern selecting file paths to reformat " + "(case insensitive, overridden by -regex)", + ) + parser.add_argument( + "-sort-includes", + action="store_true", + default=False, + help="let clang-format sort include blocks", + ) + parser.add_argument( + "-v", + "--verbose", + action="store_true", + help="be more verbose, ineffective without -i", + ) + parser.add_argument( + "-style", + help="formatting style to apply (LLVM, GNU, Google, Chromium, " + "Microsoft, Mozilla, WebKit)", + ) + parser.add_argument( + "-fallback-style", + help="The name of the predefined style used as a" + "fallback in case clang-format is invoked with" + "-style=file, but can not find the .clang-format" + "file to use.", + ) + parser.add_argument( + "-binary", + default="clang-format", + help="location of binary to use for clang-format", + ) + args = parser.parse_args() + + # Extract changed lines for each file. + filename = None + lines_by_file = {} + for line in sys.stdin: + match = re.search(r"^\+\+\+\ (.*?/){%s}(\S*)" % args.p, line) + if match: + filename = match.group(2) + if filename is None: + continue + + if args.regex is not None: + if not re.match("^%s$" % args.regex, filename): + continue + else: + if not re.match("^%s$" % args.iregex, filename, re.IGNORECASE): + continue + + match = re.search(r"^@@.*\+(\d+)(?:,(\d+))?", line) + if match: + start_line = int(match.group(1)) + line_count = 1 + if match.group(2): + line_count = int(match.group(2)) + # The input is something like + # + # @@ -1, +0,0 @@ + # + # which means no lines were added. + if line_count == 0: + continue + # Also format lines range if line_count is 0 in case of deleting + # surrounding statements. + end_line = start_line + if line_count != 0: + end_line += line_count - 1 + lines_by_file.setdefault(filename, []).extend( + ["-lines", str(start_line) + ":" + str(end_line)] + ) + + # Reformat files containing changes in place. + for filename, lines in lines_by_file.items(): + if args.i and args.verbose: + print("Formatting {}".format(filename)) + command = [args.binary, filename] + if args.i: + command.append("-i") + if args.sort_includes: + command.append("-sort-includes") + command.extend(lines) + if args.style: + command.extend(["-style", args.style]) + if args.fallback_style: + command.extend(["-fallback-style", args.fallback_style]) + + try: + p = subprocess.Popen( + command, + stdout=subprocess.PIPE, + stderr=None, + stdin=subprocess.PIPE, + universal_newlines=True, + ) + except OSError as e: + # Give the user more context when clang-format isn't + # found/isn't executable, etc. + raise RuntimeError( + 'Failed to run "%s" - %s"' % (" ".join(command), e.strerror) + ) + + stdout, stderr = p.communicate() + if p.returncode != 0: + sys.exit(p.returncode) + + if not args.i: + with open(filename, encoding="utf8") as f: + code = f.readlines() + formatted_code = StringIO(stdout).readlines() + diff = difflib.unified_diff( + code, + formatted_code, + filename, + filename, + "(before formatting)", + "(after formatting)", + ) + diff_string = "".join(diff) + if len(diff_string) > 0: + sys.stdout.write(diff_string) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/contrib/devtools/headerssync-params.py b/contrib/devtools/headerssync-params.py new file mode 100644 index 0000000000000..0198f5db99f8b --- /dev/null +++ b/contrib/devtools/headerssync-params.py @@ -0,0 +1,357 @@ +#!/usr/bin/env python3 +# Copyright (c) 2022 Pieter Wuille +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +"""Script to find the optimal parameters for the headerssync module through simulation.""" + +from math import log, exp, sqrt +from datetime import datetime, timedelta +import random + +# Parameters: + +# Aim for still working fine at some point in the future. [datetime] +TIME = datetime(2026, 10, 5) + +# Expected block interval. [timedelta] +BLOCK_INTERVAL = timedelta(seconds=600) + +# The number of headers corresponding to the minchainwork parameter. [headers] +MINCHAINWORK_HEADERS = 804000 + +# Combined processing bandwidth from all attackers to one victim. [bit/s] +# 6 Gbit/s is approximately the speed at which a single thread of a Ryzen 5950X CPU thread can hash +# headers. In practice, the victim's network bandwidth and network processing overheads probably +# impose a far lower number, but it's a useful upper bound. +ATTACK_BANDWIDTH = 6000000000 + +# How much additional permanent memory usage are attackers (jointly) allowed to cause in the victim, +# expressed as fraction of the normal memory usage due to mainchain growth, for the duration the +# attack is sustained. [unitless] +# 0.2 means that attackers, while they keep up the attack, can cause permanent memory usage due to +# headers storage to grow at 1.2 header per BLOCK_INTERVAL. +ATTACK_FRACTION = 0.2 + +# When this is set, the mapping from period size to memory usage (at optimal buffer size for that +# period) is assumed to be convex. This greatly speeds up the computation, and does not appear +# to influence the outcome. Set to False for a stronger guarantee to get the optimal result. +ASSUME_CONVEX = True + +# Explanation: +# +# The headerssync module implements a DoS protection against low-difficulty header spam which does +# not rely on checkpoints. In short it works as follows: +# +# - (initial) header synchronization is split into two phases: +# - A commitment phase, in which headers are downloaded from the peer, and a very compact +# commitment to them is remembered in per-peer memory. The commitment phase ends when the +# received chain's combined work reaches a predetermined threshold. +# - A redownload phase, during which the headers are downloaded a second time from the same peer, +# and compared against the commitment constructed in the first phase. If there is a match, the +# redownloaded headers are fed to validation and accepted into permanent storage. +# +# This separation guarantees that no headers are accepted into permanent storage without +# requiring the peer to first prove the chain actually has sufficient work. +# +# - To actually implement this commitment mechanism, the following approach is used: +# - Keep a *1 bit* commitment (constructed using a salted hash function), for every block whose +# height is a multiple of {period} plus an offset value. If RANDOMIZE_OFFSET, the offset, +# like the salt, is chosen randomly when the synchronization starts and kept fixed afterwards. +# - When redownloading, headers are fed through a per-peer queue that holds {bufsize} headers, +# before passing them to validation. All the headers in this queue are verified against the +# commitment bits created in the first phase before any header is released from it. This means +# {bufsize/period} bits are checked "on top of" each header before actually processing it, +# which results in a commitment structure with roughly {bufsize/period} bits of security, as +# once a header is modified, due to the prevhash inclusion, all future headers necessarily +# change as well. +# +# The question is what these {period} and {bufsize} parameters need to be set to. This program +# exhaustively tests a range of values to find the optimal choice, taking into account: +# +# - Minimizing the (maximum of) two scenarios that trigger per-peer memory usage: +# +# - When downloading a (likely honest) chain that reaches the chainwork threshold after {n} +# blocks, and then redownloads them, we will consume per-peer memory that is sufficient to +# store {n/period} commitment bits and {bufsize} headers. We only consider attackers without +# sufficient hashpower (as otherwise they are from a PoW perspective not attackers), which +# means {n} is restricted to the honest chain's length before reaching minchainwork. +# +# - When downloading a (likely false) chain of {n} headers that never reaches the chainwork +# threshold, we will consume per-peer memory that is sufficient to store {n/period} +# commitment bits. Such a chain may be very long, by exploiting the timewarp bug to avoid +# ramping up difficulty. There is however an absolute limit on how long such a chain can be: 6 +# blocks per second since genesis, due to the increasing MTP consensus rule. +# +# - Not gratuitously preventing synchronizing any valid chain, however difficult such a chain may +# be to construct. In particular, the above scenario with an enormous timewarp-expoiting chain +# cannot simply be ignored, as it is legal that the honest main chain is like that. We however +# do not bother minimizing the memory usage in that case (because a billion-header long honest +# chain will inevitably use far larger amounts of memory than designed for). +# +# - Keep the rate at which attackers can get low-difficulty headers accepted to the block index +# negligible. Specifically, the possibility exists for an attacker to send the honest main +# chain's headers during the commitment phase, but then start deviating at an attacker-chosen +# point by sending novel low-difficulty headers instead. Depending on how high we set the +# {bufsize/period} ratio, we can make the probability that such a header makes it in +# arbitrarily small, but at the cost of higher memory during the redownload phase. It turns out, +# some rate of memory usage growth is expected anyway due to chain growth, so permitting the +# attacker to increase that rate by a small factor isn't concerning. The attacker may start +# somewhat later than genesis, as long as the difficulty doesn't get too high. This reduces +# the attacker bandwidth required at the cost of higher PoW needed for constructing the +# alternate chain. This trade-off is ignored here, as it results in at most a small constant +# factor in attack rate. + + +# System properties: + +# Headers in the redownload buffer are stored without prevhash. [bits] +COMPACT_HEADER_SIZE = 48 * 8 + +# How many bits a header uses in P2P protocol. [bits] +NET_HEADER_SIZE = 81 * 8 + +# How many headers are sent at once. [headers] +HEADER_BATCH_COUNT = 2000 + +# Whether or not the offset of which blocks heights get checksummed is randomized. +RANDOMIZE_OFFSET = True + +# Timestamp of the genesis block +GENESIS_TIME = datetime(2009, 1, 3) + +# Derived values: + +# What rate of headers worth of RAM attackers are allowed to cause in the victim. [headers/s] +LIMIT_HEADERRATE = ATTACK_FRACTION / BLOCK_INTERVAL.total_seconds() + +# How many headers can attackers (jointly) send a victim per second. [headers/s] +NET_HEADERRATE = ATTACK_BANDWIDTH / NET_HEADER_SIZE + +# What fraction of headers sent by attackers can at most be accepted by a victim [unitless] +LIMIT_FRACTION = LIMIT_HEADERRATE / NET_HEADERRATE + +# How many headers we permit attackers to cause being accepted per attack. [headers/attack] +ATTACK_HEADERS = LIMIT_FRACTION * MINCHAINWORK_HEADERS + + +def find_max_headers(when): + """Compute the maximum number of headers a valid Bitcoin chain can have at given time.""" + # When exploiting the timewarp attack, this can be up to 6 per second since genesis. + return 6 * ((when - GENESIS_TIME) // timedelta(seconds=1)) + + +def lambert_w(value): + """Solve the equation x*exp(x)=value (x > 0, value > 0).""" + # Initial approximation. + approx = max(log(value), 0.0) + for _ in range(10): + # Newton-Rhapson iteration steps. + approx += (value * exp(-approx) - approx) / (approx + 1.0) + return approx + + +def attack_rate(period, bufsize, limit=None): + """Compute maximal accepted headers per attack in (period, bufsize) configuration. + + If limit is provided, the computation is stopped early when the result is known to exceed the + value in limit. + """ + + max_rate = None + max_honest = None + # Let the current batch 0 being received be the first one in which the attacker starts lying. + # They will only ever start doing so right after a commitment block, but where that is can be + # in a number of places. Let honest be the number of honest headers in this current batch, + # preceding the forged ones. + for honest in range(HEADER_BATCH_COUNT): + # The number of headers the attack under consideration will on average get accepted. + # This is the number being computed. + rate = 0 + + # Iterate over the possible alignments of commitments w.r.t. the first batch. In case + # the alignments are randomized, try all values. If not, the attacker can know/choose + # the alignment, and will always start forging right after a commitment. + if RANDOMIZE_OFFSET: + align_choices = list(range(period)) + else: + align_choices = [(honest - 1) % period] + # Now loop over those possible alignment values, computing the average attack rate + # over them by dividing each contribution by len(align_choices). + for align in align_choices: + # These state variables capture the situation after receiving the first batch. + # - The number of headers received after the last commitment for an honest block: + after_good_commit = HEADER_BATCH_COUNT - honest + ((honest - align - 1) % period) + # - The number of forged headers in the redownload buffer: + forged_in_buf = HEADER_BATCH_COUNT - honest + + # Now iterate over the next batches of headers received, adding contributions to the + # rate variable. + while True: + # Process the first HEADER_BATCH_COUNT headers in the buffer: + accept_forged_headers = max(forged_in_buf - bufsize, 0) + forged_in_buf -= accept_forged_headers + if accept_forged_headers: + # The probability the attack has not been detected yet at this point: + prob = 0.5 ** (after_good_commit // period) + # Update attack rate, divided by align_choices to average over the alignments. + rate += accept_forged_headers * prob / len(align_choices) + # If this means we exceed limit, bail out early (performance optimization). + if limit is not None and rate >= limit: + return rate, None + # If the maximal term being added is negligible compared to rate, stop + # iterating. + if HEADER_BATCH_COUNT * prob < 1.0e-16 * rate * len(align_choices): + break + # Update state from a new incoming batch (which is all forged) + after_good_commit += HEADER_BATCH_COUNT + forged_in_buf += HEADER_BATCH_COUNT + + if max_rate is None or rate > max_rate: + max_rate = rate + max_honest = honest + + return max_rate, max_honest + + +def memory_usage(period, bufsize, when): + """How much memory (max,mainchain,timewarp) does the (period,bufsize) configuration need?""" + + # Per-peer memory usage for a timewarp chain that never meets minchainwork + mem_timewarp = find_max_headers(when) // period + # Per-peer memory usage for being fed the main chain + mem_mainchain = (MINCHAINWORK_HEADERS // period) + bufsize * COMPACT_HEADER_SIZE + # Maximum per-peer memory usage + max_mem = max(mem_timewarp, mem_mainchain) + + return max_mem, mem_mainchain, mem_timewarp + +def find_bufsize(period, attack_headers, when, max_mem=None, min_bufsize=1): + """Determine how big bufsize needs to be given a specific period length. + + Given a period, find the smallest value of bufsize such that the attack rate against the + (period, bufsize) configuration is below attack_headers. If max_mem is provided, and no + such bufsize exists that needs less than max_mem bits of memory, None is returned. + min_bufsize is the minimal result to be considered.""" + + if max_mem is None: + succ_buf = min_bufsize - 1 + fail_buf = min_bufsize + # First double iteratively until an upper bound for failure is found. + while True: + if attack_rate(period, fail_buf, attack_headers)[0] < attack_headers: + break + succ_buf, fail_buf = fail_buf, 3 * fail_buf - 2 * succ_buf + else: + # If a long low-work header chain exists that exceeds max_mem already, give up. + if find_max_headers(when) // period > max_mem: + return None + # Otherwise, verify that the maximal buffer size that permits a mainchain sync with less + # than max_mem memory is sufficient to get the attack rate below attack_headers. If not, + # also give up. + max_buf = (max_mem - (MINCHAINWORK_HEADERS // period)) // COMPACT_HEADER_SIZE + if max_buf < min_bufsize: + return None + if attack_rate(period, max_buf, attack_headers)[0] >= attack_headers: + return None + # If it is sufficient, that's an upper bound to start our search. + succ_buf = min_bufsize - 1 + fail_buf = max_buf + + # Then perform a bisection search to narrow it down. + while fail_buf > succ_buf + 1: + try_buf = (succ_buf + fail_buf) // 2 + if attack_rate(period, try_buf, attack_headers)[0] >= attack_headers: + succ_buf = try_buf + else: + fail_buf = try_buf + return fail_buf + + +def optimize(when): + """Find the best (period, bufsize) configuration.""" + + # When period*bufsize = memory_scale, the per-peer memory for a mainchain sync and a maximally + # long low-difficulty header sync are equal. + memory_scale = (find_max_headers(when) - MINCHAINWORK_HEADERS) / COMPACT_HEADER_SIZE + # Compute approximation for {bufsize/period}, using a formula for a simplified problem. + approx_ratio = lambert_w(log(4) * memory_scale / ATTACK_HEADERS**2) / log(4) + # Use those for a first attempt. + print("Searching configurations:") + period = int(sqrt(memory_scale / approx_ratio) + 0.5) + bufsize = find_bufsize(period, ATTACK_HEADERS, when) + mem = memory_usage(period, bufsize, when) + best = (period, bufsize, mem) + maps = [(period, bufsize), (MINCHAINWORK_HEADERS + 1, None)] + print(f"- Initial: period={period}, buffer={bufsize}, mem={mem[0] / 8192:.3f} KiB") + + # Consider all period values between 1 and MINCHAINWORK_HEADERS, except the one just tried. + periods = [iv for iv in range(1, MINCHAINWORK_HEADERS + 1) if iv != period] + # Iterate, picking a random element from periods, computing its corresponding bufsize, and + # then using the result to shrink the period. + while True: + # Remove all periods whose memory usage for low-work long chain sync exceed the best + # memory usage we've found so far. + periods = [p for p in periods if find_max_headers(when) // p < best[2][0]] + # Stop if there is nothing left to try. + if len(periods) == 0: + break + # Pick a random remaining option for period size, and compute corresponding bufsize. + period = periods.pop(random.randrange(len(periods))) + # The buffer size (at a given attack level) cannot shrink as the period grows. Find the + # largest period smaller than the selected one we know the buffer size for, and use that + # as a lower bound to find_bufsize. + min_bufsize = max([(p, b) for p, b in maps if p < period] + [(0,0)])[1] + bufsize = find_bufsize(period, ATTACK_HEADERS, when, best[2][0], min_bufsize) + if bufsize is not None: + # We found a (period, bufsize) configuration with better memory usage than our best + # so far. Remember it for future lower bounds. + maps.append((period, bufsize)) + mem = memory_usage(period, bufsize, when) + assert mem[0] <= best[2][0] + if ASSUME_CONVEX: + # Remove all periods that are on the other side of the former best as the new + # best. + periods = [p for p in periods if (p < best[0]) == (period < best[0])] + best = (period, bufsize, mem) + print(f"- New best: period={period}, buffer={bufsize}, mem={mem[0] / 8192:.3f} KiB") + else: + # The (period, bufsize) configuration we found is worse than what we already had. + if ASSUME_CONVEX: + # Remove all periods that are on the other side of the tried configuration as the + # best one. + periods = [p for p in periods if (p < period) == (best[0] < period)] + + # Return the result. + period, bufsize, _ = best + return period, bufsize + + +def analyze(when): + """Find the best configuration and print it out.""" + + period, bufsize = optimize(when) + # Compute accurate statistics for the best found configuration. + _, mem_mainchain, mem_timewarp = memory_usage(period, bufsize, when) + headers_per_attack, _ = attack_rate(period, bufsize) + attack_volume = NET_HEADER_SIZE * MINCHAINWORK_HEADERS + # And report them. + print() + print("Optimal configuration:") + print() + print("//! Store one header commitment per HEADER_COMMITMENT_PERIOD blocks.") + print(f"constexpr size_t HEADER_COMMITMENT_PERIOD{{{period}}};") + print() + print("//! Only feed headers to validation once this many headers on top have been") + print("//! received and validated against commitments.") + print(f"constexpr size_t REDOWNLOAD_BUFFER_SIZE{{{bufsize}}};" + f" // {bufsize}/{period} = ~{bufsize/period:.1f} commitments") + print() + print("Properties:") + print(f"- Per-peer memory for mainchain sync: {mem_mainchain / 8192:.3f} KiB") + print(f"- Per-peer memory for timewarp attack: {mem_timewarp / 8192:.3f} KiB") + print(f"- Attack rate: {1/headers_per_attack:.1f} attacks for 1 header of memory growth") + print(f" (where each attack costs {attack_volume / 8388608:.3f} MiB bandwidth)") + + +analyze(TIME) diff --git a/contrib/devtools/iwyu/bitcoin.core.imp b/contrib/devtools/iwyu/bitcoin.core.imp index 919ffab102ddb..befc949f18ca8 100644 --- a/contrib/devtools/iwyu/bitcoin.core.imp +++ b/contrib/devtools/iwyu/bitcoin.core.imp @@ -1,7 +1,4 @@ # Fixups / upstreamed changes [ - { include: [ "", private, "", public ] }, - { include: [ "", private, "", public ] }, - { include: [ "", private, "", public ] }, { include: [ "", private, "", public ] }, ] diff --git a/contrib/devtools/security-check.py b/contrib/devtools/security-check.py index 6cd022ef1710e..f57e9abfeca1c 100755 --- a/contrib/devtools/security-check.py +++ b/contrib/devtools/security-check.py @@ -8,9 +8,8 @@ Otherwise the exit status will be 1 and it will log which executables failed which checks. ''' import sys -from typing import List -import lief #type:ignore +import lief def check_ELF_RELRO(binary) -> bool: ''' @@ -113,7 +112,7 @@ def check_ELF_control_flow(binary) -> bool: main = binary.get_function_address('main') content = binary.get_content_from_virtual_address(main, 4, lief.Binary.VA_TYPES.AUTO) - if content == [243, 15, 30, 250]: # endbr64 + if content.tolist() == [243, 15, 30, 250]: # endbr64 return True return False @@ -142,7 +141,7 @@ def check_PE_control_flow(binary) -> bool: content = binary.get_content_from_virtual_address(virtual_address, 4, lief.Binary.VA_TYPES.VA) - if content == [243, 15, 30, 250]: # endbr64 + if content.tolist() == [243, 15, 30, 250]: # endbr64 return True return False @@ -158,12 +157,11 @@ def check_MACHO_NOUNDEFS(binary) -> bool: ''' return binary.header.has(lief.MachO.HEADER_FLAGS.NOUNDEFS) -def check_MACHO_LAZY_BINDINGS(binary) -> bool: +def check_MACHO_FIXUP_CHAINS(binary) -> bool: ''' - Check for no lazy bindings. - We don't use or check for MH_BINDATLOAD. See #18295. + Check for use of chained fixups. ''' - return binary.dyld_info.lazy_bind == (0,0) + return binary.has_dyld_chained_fixups def check_MACHO_Canary(binary) -> bool: ''' @@ -190,7 +188,17 @@ def check_MACHO_control_flow(binary) -> bool: ''' content = binary.get_content_from_virtual_address(binary.entrypoint, 4, lief.Binary.VA_TYPES.AUTO) - if content == [243, 15, 30, 250]: # endbr64 + if content.tolist() == [243, 15, 30, 250]: # endbr64 + return True + return False + +def check_MACHO_branch_protection(binary) -> bool: + ''' + Check for branch protection instrumentation + ''' + content = binary.get_content_from_virtual_address(binary.entrypoint, 4, lief.Binary.VA_TYPES.AUTO) + + if content.tolist() == [95, 36, 3, 213]: # bti return True return False @@ -214,8 +222,8 @@ def check_MACHO_control_flow(binary) -> bool: BASE_MACHO = [ ('NOUNDEFS', check_MACHO_NOUNDEFS), - ('LAZY_BINDINGS', check_MACHO_LAZY_BINDINGS), ('Canary', check_MACHO_Canary), + ('FIXUP_CHAINS', check_MACHO_FIXUP_CHAINS), ] CHECKS = { @@ -233,7 +241,7 @@ def check_MACHO_control_flow(binary) -> bool: lief.ARCHITECTURES.X86: BASE_MACHO + [('PIE', check_PIE), ('NX', check_NX), ('CONTROL_FLOW', check_MACHO_control_flow)], - lief.ARCHITECTURES.ARM64: BASE_MACHO, + lief.ARCHITECTURES.ARM64: BASE_MACHO + [('BRANCH_PROTECTION', check_MACHO_branch_protection)], } } @@ -256,7 +264,7 @@ def check_MACHO_control_flow(binary) -> bool: retval = 1 continue - failed: List[str] = [] + failed: list[str] = [] for (name, func) in CHECKS[etype][arch]: if not func(binary): failed.append(name) diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py index 3507f954f3665..b3e73bb2b9170 100755 --- a/contrib/devtools/symbol-check.py +++ b/contrib/devtools/symbol-check.py @@ -11,9 +11,8 @@ find ../path/to/binaries -type f -executable | xargs python3 contrib/devtools/symbol-check.py ''' import sys -from typing import List, Dict -import lief #type:ignore +import lief # Debian 10 (Buster) EOL: 2024. https://wiki.debian.org/LTS # @@ -33,7 +32,7 @@ # See https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html for more info. MAX_VERSIONS = { -'GCC': (4,8,0), +'GCC': (4,3,0), 'GLIBC': { lief.ELF.ARCH.x86_64: (2,27), lief.ELF.ARCH.ARM: (2,27), @@ -53,7 +52,7 @@ # Expected linker-loader names can be found here: # https://sourceware.org/glibc/wiki/ABIList?action=recall&rev=16 -ELF_INTERPRETER_NAMES: Dict[lief.ELF.ARCH, Dict[lief.ENDIANNESS, str]] = { +ELF_INTERPRETER_NAMES: dict[lief.ELF.ARCH, dict[lief.ENDIANNESS, str]] = { lief.ELF.ARCH.x86_64: { lief.ENDIANNESS.LITTLE: "/lib64/ld-linux-x86-64.so.2", }, @@ -72,7 +71,7 @@ }, } -ELF_ABIS: Dict[lief.ELF.ARCH, Dict[lief.ENDIANNESS, List[int]]] = { +ELF_ABIS: dict[lief.ELF.ARCH, dict[lief.ENDIANNESS, list[int]]] = { lief.ELF.ARCH.x86_64: { lief.ENDIANNESS.LITTLE: [3,2,0], }, @@ -98,7 +97,6 @@ 'libc.so.6', # C library 'libpthread.so.0', # threading 'libm.so.6', # math library -'librt.so.1', # real-time (clock) 'libatomic.so.1', 'ld-linux-x86-64.so.2', # 64-bit dynamic linker 'ld-linux.so.2', # 32-bit dynamic linker @@ -158,21 +156,21 @@ 'KERNEL32.dll', # win32 base APIs 'msvcrt.dll', # C standard library for MSVC 'SHELL32.dll', # shell API -'USER32.dll', # user interface 'WS2_32.dll', # sockets # bitcoin-qt only 'dwmapi.dll', # desktop window manager 'GDI32.dll', # graphics device interface 'IMM32.dll', # input method editor -'NETAPI32.dll', +'NETAPI32.dll', # network management 'ole32.dll', # component object model 'OLEAUT32.dll', # OLE Automation API 'SHLWAPI.dll', # light weight shell API -'USERENV.dll', -'UxTheme.dll', +'USER32.dll', # user interface +'USERENV.dll', # user management +'UxTheme.dll', # visual style 'VERSION.dll', # version checking 'WINMM.dll', # WinMM audio API -'WTSAPI32.dll', +'WTSAPI32.dll', # Remote Desktop } def check_version(max_versions, version, arch) -> bool: @@ -232,12 +230,17 @@ def check_MACHO_libraries(binary) -> bool: return ok def check_MACHO_min_os(binary) -> bool: - if binary.build_version.minos == [10,15,0]: + if binary.build_version.minos == [11,0,0]: return True return False def check_MACHO_sdk(binary) -> bool: - if binary.build_version.sdk == [11, 0, 0]: + if binary.build_version.sdk == [14, 0, 0]: + return True + return False + +def check_MACHO_ld64(binary) -> bool: + if binary.build_version.tools[0].version == [711, 0, 0]: return True return False @@ -279,6 +282,7 @@ def check_ELF_ABI(binary) -> bool: ('DYNAMIC_LIBRARIES', check_MACHO_libraries), ('MIN_OS', check_MACHO_min_os), ('SDK', check_MACHO_sdk), + ('LD64', check_MACHO_ld64), ], lief.EXE_FORMATS.PE: [ ('DYNAMIC_LIBRARIES', check_PE_libraries), @@ -297,7 +301,7 @@ def check_ELF_ABI(binary) -> bool: retval = 1 continue - failed: List[str] = [] + failed: list[str] = [] for (name, func) in CHECKS[etype]: if not func(binary): failed.append(name) diff --git a/contrib/devtools/test-security-check.py b/contrib/devtools/test-security-check.py index 54718fd7a1f84..48823c7e45819 100755 --- a/contrib/devtools/test-security-check.py +++ b/contrib/devtools/test-security-check.py @@ -5,10 +5,9 @@ ''' Test script for security-check.py ''' -import lief #type:ignore +import lief import os import subprocess -from typing import List import unittest from utils import determine_wellknown_cmd @@ -28,13 +27,13 @@ def clean_files(source, executable): os.remove(source) os.remove(executable) -def call_security_check(cc, source, executable, options): +def call_security_check(cc: str, source: str, executable: str, options) -> tuple: # This should behave the same as AC_TRY_LINK, so arrange well-known flags # in the same order as autoconf would. # # See the definitions for ac_link in autoconf's lib/autoconf/c.m4 file for # reference. - env_flags: List[str] = [] + env_flags: list[str] = [] for var in ['CFLAGS', 'CPPFLAGS', 'LDFLAGS']: env_flags += filter(None, os.environ.get(var, '').split(' ')) @@ -119,29 +118,31 @@ def test_MACHO(self): arch = get_arch(cc, source, executable) if arch == lief.ARCHITECTURES.X86: - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fno-stack-protector']), - (1, executable+': failed NOUNDEFS LAZY_BINDINGS Canary PIE NX CONTROL_FLOW')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fstack-protector-all']), - (1, executable+': failed NOUNDEFS LAZY_BINDINGS PIE NX CONTROL_FLOW')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-fstack-protector-all']), - (1, executable+': failed NOUNDEFS LAZY_BINDINGS PIE CONTROL_FLOW')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-fstack-protector-all']), - (1, executable+': failed LAZY_BINDINGS PIE CONTROL_FLOW')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-bind_at_load','-fstack-protector-all']), + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fno-stack-protector', '-Wl,-no_fixup_chains']), + (1, executable+': failed NOUNDEFS Canary FIXUP_CHAINS PIE NX CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fno-stack-protector', '-Wl,-fixup_chains']), + (1, executable+': failed NOUNDEFS Canary PIE NX CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fstack-protector-all', '-Wl,-fixup_chains']), + (1, executable+': failed NOUNDEFS PIE NX CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-fstack-protector-all', '-Wl,-fixup_chains']), + (1, executable+': failed NOUNDEFS PIE CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-fstack-protector-all', '-Wl,-fixup_chains']), (1, executable+': failed PIE CONTROL_FLOW')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-bind_at_load','-fstack-protector-all', '-fcf-protection=full']), + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-fstack-protector-all', '-Wl,-fixup_chains']), + (1, executable+': failed PIE CONTROL_FLOW')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-fstack-protector-all', '-fcf-protection=full', '-Wl,-fixup_chains']), (1, executable+': failed PIE')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-pie','-Wl,-bind_at_load','-fstack-protector-all', '-fcf-protection=full']), + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-pie','-fstack-protector-all', '-fcf-protection=full', '-Wl,-fixup_chains']), (0, '')) else: # arm64 darwin doesn't support non-PIE binaries, control flow or executable stacks - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-flat_namespace','-fno-stack-protector']), - (1, executable+': failed NOUNDEFS LAZY_BINDINGS Canary')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-flat_namespace','-fstack-protector-all']), - (1, executable+': failed NOUNDEFS LAZY_BINDINGS')) - self.assertEqual(call_security_check(cc, source, executable, ['-fstack-protector-all']), - (1, executable+': failed LAZY_BINDINGS')) - self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-bind_at_load','-fstack-protector-all']), + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-flat_namespace','-fno-stack-protector', '-Wl,-no_fixup_chains']), + (1, executable+': failed NOUNDEFS Canary FIXUP_CHAINS BRANCH_PROTECTION')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-flat_namespace','-fno-stack-protector', '-Wl,-fixup_chains', '-mbranch-protection=bti']), + (1, executable+': failed NOUNDEFS Canary')) + self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-flat_namespace','-fstack-protector-all', '-Wl,-fixup_chains', '-mbranch-protection=bti']), + (1, executable+': failed NOUNDEFS')) + self.assertEqual(call_security_check(cc, source, executable, ['-fstack-protector-all', '-Wl,-fixup_chains', '-mbranch-protection=bti']), (0, '')) diff --git a/contrib/devtools/test-symbol-check.py b/contrib/devtools/test-symbol-check.py index e304880140e6f..0140decb25fdd 100755 --- a/contrib/devtools/test-symbol-check.py +++ b/contrib/devtools/test-symbol-check.py @@ -7,18 +7,17 @@ ''' import os import subprocess -from typing import List import unittest from utils import determine_wellknown_cmd -def call_symbol_check(cc: List[str], source, executable, options): +def call_symbol_check(cc: list[str], source, executable, options): # This should behave the same as AC_TRY_LINK, so arrange well-known flags # in the same order as autoconf would. # # See the definitions for ac_link in autoconf's lib/autoconf/c.m4 file for # reference. - env_flags: List[str] = [] + env_flags: list[str] = [] for var in ['CFLAGS', 'CPPFLAGS', 'LDFLAGS']: env_flags += filter(None, os.environ.get(var, '').split(' ')) @@ -28,7 +27,7 @@ def call_symbol_check(cc: List[str], source, executable, options): os.remove(executable) return (p.returncode, p.stdout.rstrip()) -def get_machine(cc: List[str]): +def get_machine(cc: list[str]): p = subprocess.run([*cc,'-dumpmachine'], stdout=subprocess.PIPE, text=True) return p.stdout.rstrip() @@ -121,7 +120,7 @@ def test_MACHO(self): } ''') - self.assertEqual(call_symbol_check(cc, source, executable, ['-Wl,-platform_version','-Wl,macos', '-Wl,10.15', '-Wl,11.4']), + self.assertEqual(call_symbol_check(cc, source, executable, ['-Wl,-platform_version','-Wl,macos', '-Wl,11.0', '-Wl,11.4']), (1, f'{executable}: failed SDK')) def test_PE(self): diff --git a/contrib/devtools/test_utxo_snapshots.sh b/contrib/devtools/test_utxo_snapshots.sh new file mode 100755 index 0000000000000..93a4cd1683e1a --- /dev/null +++ b/contrib/devtools/test_utxo_snapshots.sh @@ -0,0 +1,209 @@ +#!/usr/bin/env bash +# Demonstrate the creation and usage of UTXO snapshots. +# +# A server node starts up, IBDs up to a certain height, then generates a UTXO +# snapshot at that point. +# +# The server then downloads more blocks (to create a diff from the snapshot). +# +# We bring a client up, load the UTXO snapshot, and we show the client sync to +# the "network tip" and then start a background validation of the snapshot it +# loaded. We see the background validation chainstate removed after validation +# completes. +# +# The shellcheck rule SC2086 (quoted variables) disablements are necessary +# since this rule needs to be violated in order to get bitcoind to pick up on +# $EARLY_IBD_FLAGS for the script to work. + +export LC_ALL=C +set -e + +BASE_HEIGHT=${1:-30000} +INCREMENTAL_HEIGHT=20000 +FINAL_HEIGHT=$((BASE_HEIGHT + INCREMENTAL_HEIGHT)) + +SERVER_DATADIR="$(pwd)/utxodemo-data-server-$BASE_HEIGHT" +CLIENT_DATADIR="$(pwd)/utxodemo-data-client-$BASE_HEIGHT" +UTXO_DAT_FILE="$(pwd)/utxo.$BASE_HEIGHT.dat" + +# Chosen to try to not interfere with any running bitcoind processes. +SERVER_PORT=8633 +SERVER_RPC_PORT=8632 + +CLIENT_PORT=8733 +CLIENT_RPC_PORT=8732 + +SERVER_PORTS="-port=${SERVER_PORT} -rpcport=${SERVER_RPC_PORT}" +CLIENT_PORTS="-port=${CLIENT_PORT} -rpcport=${CLIENT_RPC_PORT}" + +# Ensure the client exercises all indexes to test that snapshot use works +# properly with indexes. +ALL_INDEXES="-txindex -coinstatsindex -blockfilterindex=1" + +if ! command -v jq >/dev/null ; then + echo "This script requires jq to parse JSON RPC output. Please install it." + echo "(e.g. sudo apt install jq)" + exit 1 +fi + +DUMP_OUTPUT="dumptxoutset-output-$BASE_HEIGHT.json" + +finish() { + echo + echo "Killing server and client PIDs ($SERVER_PID, $CLIENT_PID) and cleaning up datadirs" + echo + rm -f "$UTXO_DAT_FILE" "$DUMP_OUTPUT" + rm -rf "$SERVER_DATADIR" "$CLIENT_DATADIR" + kill -9 "$SERVER_PID" "$CLIENT_PID" +} + +trap finish EXIT + +# Need to specify these to trick client into accepting server as a peer +# it can IBD from, otherwise the default values prevent IBD from the server node. +EARLY_IBD_FLAGS="-maxtipage=9223372036854775207 -minimumchainwork=0x00" + +server_rpc() { + ./src/bitcoin-cli -rpcport=$SERVER_RPC_PORT -datadir="$SERVER_DATADIR" "$@" +} +client_rpc() { + ./src/bitcoin-cli -rpcport=$CLIENT_RPC_PORT -datadir="$CLIENT_DATADIR" "$@" +} +server_sleep_til_boot() { + while ! server_rpc ping >/dev/null 2>&1; do sleep 0.1; done +} +client_sleep_til_boot() { + while ! client_rpc ping >/dev/null 2>&1; do sleep 0.1; done +} +server_sleep_til_shutdown() { + while server_rpc ping >/dev/null 2>&1; do sleep 0.1; done +} + +mkdir -p "$SERVER_DATADIR" "$CLIENT_DATADIR" + +echo "Hi, welcome to the assumeutxo demo/test" +echo +echo "We're going to" +echo +echo " - start up a 'server' node, sync it via mainnet IBD to height ${BASE_HEIGHT}" +echo " - create a UTXO snapshot at that height" +echo " - IBD ${INCREMENTAL_HEIGHT} more blocks on top of that" +echo +echo "then we'll demonstrate assumeutxo by " +echo +echo " - starting another node (the 'client') and loading the snapshot in" +echo " * first you'll have to modify the code slightly (chainparams) and recompile" +echo " * don't worry, we'll make it easy" +echo " - observing the client sync ${INCREMENTAL_HEIGHT} blocks on top of the snapshot from the server" +echo " - observing the client validate the snapshot chain via background IBD" +echo +read -p "Press [enter] to continue" _ + +echo +echo "-- Starting the demo. You might want to run the two following commands in" +echo " separate terminal windows:" +echo +echo " watch -n0.1 tail -n 30 $SERVER_DATADIR/debug.log" +echo " watch -n0.1 tail -n 30 $CLIENT_DATADIR/debug.log" +echo +read -p "Press [enter] to continue" _ + +echo +echo "-- IBDing the blocks (height=$BASE_HEIGHT) required to the server node..." +# shellcheck disable=SC2086 +./src/bitcoind -logthreadnames=1 $SERVER_PORTS \ + -datadir="$SERVER_DATADIR" $EARLY_IBD_FLAGS -stopatheight="$BASE_HEIGHT" >/dev/null + +echo +echo "-- Creating snapshot at ~ height $BASE_HEIGHT ($UTXO_DAT_FILE)..." +server_sleep_til_shutdown # wait for stopatheight to be hit +# shellcheck disable=SC2086 +./src/bitcoind -logthreadnames=1 $SERVER_PORTS \ + -datadir="$SERVER_DATADIR" $EARLY_IBD_FLAGS -connect=0 -listen=0 >/dev/null & +SERVER_PID="$!" + +server_sleep_til_boot +server_rpc dumptxoutset "$UTXO_DAT_FILE" > "$DUMP_OUTPUT" +cat "$DUMP_OUTPUT" +kill -9 "$SERVER_PID" + +RPC_BASE_HEIGHT=$(jq -r .base_height < "$DUMP_OUTPUT") +RPC_AU=$(jq -r .txoutset_hash < "$DUMP_OUTPUT") +RPC_NCHAINTX=$(jq -r .nchaintx < "$DUMP_OUTPUT") +RPC_BLOCKHASH=$(jq -r .base_hash < "$DUMP_OUTPUT") + +server_sleep_til_shutdown + +echo +echo "-- Now: add the following to CMainParams::m_assumeutxo_data" +echo " in src/kernel/chainparams.cpp, and recompile:" +echo +echo " {${RPC_BASE_HEIGHT}, AssumeutxoHash{uint256S(\"0x${RPC_AU}\")}, ${RPC_NCHAINTX}, uint256S(\"0x${RPC_BLOCKHASH}\")}," +echo +echo +echo "-- IBDing more blocks to the server node (height=$FINAL_HEIGHT) so there is a diff between snapshot and tip..." +# shellcheck disable=SC2086 +./src/bitcoind $SERVER_PORTS -logthreadnames=1 -datadir="$SERVER_DATADIR" \ + $EARLY_IBD_FLAGS -stopatheight="$FINAL_HEIGHT" >/dev/null + +echo +echo "-- Starting the server node to provide blocks to the client node..." +# shellcheck disable=SC2086 +./src/bitcoind $SERVER_PORTS -logthreadnames=1 -debug=net -datadir="$SERVER_DATADIR" \ + $EARLY_IBD_FLAGS -connect=0 -listen=1 >/dev/null & +SERVER_PID="$!" +server_sleep_til_boot + +echo +echo "-- Okay, what you're about to see is the client starting up and activating the snapshot." +echo " I'm going to display the top 14 log lines from the client on top of an RPC called" +echo " getchainstates, which is like getblockchaininfo but for both the snapshot and " +echo " background validation chainstates." +echo +echo " You're going to first see the snapshot chainstate sync to the server's tip, then" +echo " the background IBD chain kicks in to validate up to the base of the snapshot." +echo +echo " Once validation of the snapshot is done, you should see log lines indicating" +echo " that we've deleted the background validation chainstate." +echo +echo " Once everything completes, exit the watch command with CTRL+C." +echo +read -p "When you're ready for all this, hit [enter]" _ + +echo +echo "-- Starting the client node to get headers from the server, then load the snapshot..." +# shellcheck disable=SC2086 +./src/bitcoind $CLIENT_PORTS $ALL_INDEXES -logthreadnames=1 -datadir="$CLIENT_DATADIR" \ + -connect=0 -addnode=127.0.0.1:$SERVER_PORT -debug=net $EARLY_IBD_FLAGS >/dev/null & +CLIENT_PID="$!" +client_sleep_til_boot + +echo +echo "-- Initial state of the client:" +client_rpc getchainstates + +echo +echo "-- Loading UTXO snapshot into client..." +client_rpc loadtxoutset "$UTXO_DAT_FILE" + +watch -n 0.3 "( tail -n 14 $CLIENT_DATADIR/debug.log ; echo ; ./src/bitcoin-cli -rpcport=$CLIENT_RPC_PORT -datadir=$CLIENT_DATADIR getchainstates) | cat" + +echo +echo "-- Okay, now I'm going to restart the client to make sure that the snapshot chain reloads " +echo " as the main chain properly..." +echo +echo " Press CTRL+C after you're satisfied to exit the demo" +echo +read -p "Press [enter] to continue" + +client_sleep_til_boot +# shellcheck disable=SC2086 +./src/bitcoind $CLIENT_PORTS $ALL_INDEXES -logthreadnames=1 -datadir="$CLIENT_DATADIR" -connect=0 \ + -addnode=127.0.0.1:$SERVER_PORT "$EARLY_IBD_FLAGS" >/dev/null & +CLIENT_PID="$!" +client_sleep_til_boot + +watch -n 0.3 "( tail -n 14 $CLIENT_DATADIR/debug.log ; echo ; ./src/bitcoin-cli -rpcport=$CLIENT_RPC_PORT -datadir=$CLIENT_DATADIR getchainstates) | cat" + +echo +echo "-- Done!" diff --git a/contrib/devtools/utils.py b/contrib/devtools/utils.py index 68ad1c3aba191..8b4c67c6c0ce2 100755 --- a/contrib/devtools/utils.py +++ b/contrib/devtools/utils.py @@ -8,10 +8,9 @@ import shutil import sys import os -from typing import List -def determine_wellknown_cmd(envvar, progname) -> List[str]: +def determine_wellknown_cmd(envvar, progname) -> list[str]: maybe_env = os.getenv(envvar) maybe_which = shutil.which(progname) if maybe_env: diff --git a/contrib/devtools/utxo_snapshot.sh b/contrib/devtools/utxo_snapshot.sh index dee25ff67b61e..fbb8591965fd0 100755 --- a/contrib/devtools/utxo_snapshot.sh +++ b/contrib/devtools/utxo_snapshot.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # -# Copyright (c) 2019 The Bitcoin Core developers +# Copyright (c) 2019-2023 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # @@ -8,6 +8,8 @@ export LC_ALL=C set -ueo pipefail +NETWORK_DISABLED=false + if (( $# < 3 )); then echo 'Usage: utxo_snapshot.sh ' echo @@ -26,19 +28,67 @@ OUTPUT_PATH="${1}"; shift; # Most of the calls we make take a while to run, so pad with a lengthy timeout. BITCOIN_CLI_CALL="${*} -rpcclienttimeout=9999999" +# Check if the node is pruned and get the pruned block height +PRUNED=$( ${BITCOIN_CLI_CALL} getblockchaininfo | awk '/pruneheight/ {print $2}' | tr -d ',' ) + +if (( GENERATE_AT_HEIGHT < PRUNED )); then + echo "Error: The requested snapshot height (${GENERATE_AT_HEIGHT}) should be greater than the pruned block height (${PRUNED})." + exit 1 +fi + +# Early exit if file at OUTPUT_PATH already exists +if [[ -e "$OUTPUT_PATH" ]]; then + (>&2 echo "Error: $OUTPUT_PATH already exists or is not a valid path.") + exit 1 +fi + +# Validate that the path is correct +if [[ "${OUTPUT_PATH}" != "-" && ! -d "$(dirname "${OUTPUT_PATH}")" ]]; then + (>&2 echo "Error: The directory $(dirname "${OUTPUT_PATH}") does not exist.") + exit 1 +fi + +function cleanup { + (>&2 echo "Restoring chain to original height; this may take a while") + ${BITCOIN_CLI_CALL} reconsiderblock "${PIVOT_BLOCKHASH}" + + if $NETWORK_DISABLED; then + (>&2 echo "Restoring network activity") + ${BITCOIN_CLI_CALL} setnetworkactive true + fi +} + +function early_exit { + (>&2 echo "Exiting due to Ctrl-C") + cleanup + exit 1 +} + +# Prompt the user to disable network activity +read -p "Do you want to disable network activity (setnetworkactive false) before running invalidateblock? (Y/n): " -r +if [[ "$REPLY" =~ ^[Yy]*$ || -z "$REPLY" ]]; then + # User input is "Y", "y", or Enter key, proceed with the action + NETWORK_DISABLED=true + (>&2 echo "Disabling network activity") + ${BITCOIN_CLI_CALL} setnetworkactive false +else + (>&2 echo "Network activity remains enabled") +fi + # Block we'll invalidate/reconsider to rewind/fast-forward the chain. PIVOT_BLOCKHASH=$($BITCOIN_CLI_CALL getblockhash $(( GENERATE_AT_HEIGHT + 1 )) ) +# Trap for normal exit and Ctrl-C +trap cleanup EXIT +trap early_exit INT + (>&2 echo "Rewinding chain back to height ${GENERATE_AT_HEIGHT} (by invalidating ${PIVOT_BLOCKHASH}); this may take a while") ${BITCOIN_CLI_CALL} invalidateblock "${PIVOT_BLOCKHASH}" if [[ "${OUTPUT_PATH}" = "-" ]]; then (>&2 echo "Generating txoutset info...") - ${BITCOIN_CLI_CALL} gettxoutsetinfo | grep hash_serialized_2 | sed 's/^.*: "\(.\+\)\+",/\1/g' + ${BITCOIN_CLI_CALL} gettxoutsetinfo | grep hash_serialized_3 | sed 's/^.*: "\(.\+\)\+",/\1/g' else (>&2 echo "Generating UTXO snapshot...") ${BITCOIN_CLI_CALL} dumptxoutset "${OUTPUT_PATH}" fi - -(>&2 echo "Restoring chain to original height; this may take a while") -${BITCOIN_CLI_CALL} reconsiderblock "${PIVOT_BLOCKHASH}" diff --git a/contrib/guix/INSTALL.md b/contrib/guix/INSTALL.md index bbd88e58f3dc9..35ea83e585ba3 100644 --- a/contrib/guix/INSTALL.md +++ b/contrib/guix/INSTALL.md @@ -62,9 +62,6 @@ so you should log out and log back in. Please refer to fanquake's instructions [here](https://github.com/fanquake/core-review/tree/master/guix). -Note that the `Dockerfile` is largely equivalent to running through the binary -tarball installation steps. - ## Option 4: Using a distribution-maintained package Note that this section is based on the distro packaging situation at the time of @@ -74,25 +71,15 @@ https://repology.org/project/guix/versions ### Debian / Ubuntu -Guix v1.2.0 is available as a distribution package starting in [Debian -11](https://packages.debian.org/bullseye/guix) and [Ubuntu -21.04](https://packages.ubuntu.com/search?keywords=guix). - -Note that if you intend on using Guix without using any substitutes (more -details [here][security-model]), v1.2.0 has a known problem when building GnuTLS -from source. Solutions and workarounds are documented -[here](#gnutls-test-suite-fail-status-request-revoked). - +Guix is available as a distribution package in [Debian +](https://packages.debian.org/search?keywords=guix) and [Ubuntu +](https://packages.ubuntu.com/search?keywords=guix). To install: ```sh sudo apt install guix ``` -For up-to-date information on Debian and Ubuntu's release history: -- [Debian release history](https://www.debian.org/releases/) -- [Ubuntu release history](https://ubuntu.com/about/release-cycle) - ### Arch Linux Guix is available in the AUR as @@ -167,80 +154,41 @@ For reference, the graphic below outlines Guix v1.3.0's dependency graph: ![bootstrap map](https://user-images.githubusercontent.com/6399679/125064185-a9a59880-e0b0-11eb-82c1-9b8e5dc9950d.png) -#### Consider /tmp on tmpfs - -If you use an NVME (SSD) drive, you may encounter [cryptic build errors](#coreutils-fail-teststail-2inotify-dir-recreate). Mounting a [tmpfs at /tmp](https://ubuntu.com/blog/data-driven-analysis-tmp-on-tmpfs) should prevent this and may improve performance as a bonus. - -#### Guile - -##### Choosing a Guile version and sticking to it - -One of the first things you need to decide is which Guile version you want to -use: Guile v2.2 or Guile v3.0. Unlike the python2 to python3 transition, Guile -v2.2 and Guile v3.0 are largely compatible, as evidenced by the fact that most -Guile packages and even [Guix -itself](https://guix.gnu.org/en/blog/2020/guile-3-and-guix/) support running on -both. - -What is important here is that you **choose one**, and you **remain consistent** -with your choice throughout **all Guile-related packages**, no matter if they -are installed via the distribution's package manager or installed from source. -This is because the files for Guile packages are installed to directories which -are separated based on the Guile version. - -###### Example: Checking that Ubuntu's `guile-git` is compatible with your chosen Guile version - -On Ubuntu Focal: +If you do not care about building each dependency from source, and Guix is +already packaged for your distribution, you can easily install only the build +dependencies of Guix. For example, to enable deb-src and install the Guix build +dependencies on Ubuntu/Debian: ```sh -$ apt show guile-git -Package: guile-git -... -Depends: guile-2.2, guile-bytestructures, libgit2-dev -... +sed -i 's|# deb-src|deb-src|g' /etc/apt/sources.list +apt update +apt-get build-dep -y guix ``` -As you can see, the package `guile-git` depends on `guile-2.2`, meaning that it -was likely built for Guile v2.2. This means that if you decided to use Guile -v3.0 on Ubuntu Focal, you would need to build guile-git from source instead of -using the distribution package. +If this succeeded, you can likely skip to section +["Building and Installing Guix itself"](#building-and-installing-guix-itself). -On Ubuntu Hirsute: - -```sh -$ apt show guile-git -Package: guile-git -... -Depends: guile-3.0 | guile-2.2, guile-bytestructures (>= 1.0.7-3~), libgit2-dev (>= 1.0) -... -``` - -In this case, `guile-git` depends on either `guile-3.0` or `guile-2.2`, meaning -that it would work no matter what Guile version you decided to use. +#### Guile ###### Corner case: Multiple versions of Guile on one system -It is recommended to only install one version of Guile, so that build systems do +It is recommended to only install the required version of Guile, so that build systems do not get confused about which Guile to use. -However, if you insist on having both Guile v2.2 and Guile v3.0 installed on -your system, then you need to **consistently** specify one of -`GUILE_EFFECTIVE_VERSION=3.0` or `GUILE_EFFECTIVE_VERSION=2.2` to all +However, if you insist on having more versions of Guile installed on +your system, then you need to **consistently** specify +`GUILE_EFFECTIVE_VERSION=3.0` to all `./configure` invocations for Guix and its dependencies. ##### Installing Guile -Guile is most likely already packaged for your distribution, so after you have -[chosen a Guile version](#choosing-a-guile-version-and-sticking-to-it), install -it via your distribution's package manager. - If your distribution splits packages into `-dev`-suffixed and non-`-dev`-suffixed sub-packages (as is the case for Debian-derived distributions), please make sure to install both. For example, to install Guile -v2.2 on Debian/Ubuntu: +v3.0 on Debian/Ubuntu: ```sh -apt install guile-2.2 guile-2.2-dev +apt install guile-3.0 guile-3.0-dev ``` #### Mixing distribution packages and source-built packages @@ -258,16 +206,16 @@ source-built packages, you will need to augment the `GUILE_LOAD_PATH` and `GUILE_LOAD_COMPILED_PATH` environment variables so that Guile will look under the right prefix and find your source-built packages. -For example, if you are using Guile v2.2, and have Guile packages in the +For example, if you are using Guile v3.0, and have Guile packages in the `/usr/local` prefix, either add the following lines to your `.profile` or `.bash_profile` so that the environment variable is properly set for all future shell logins, or paste the lines into a POSIX-style shell to temporarily modify the environment variables of your current shell session. ```sh -# Help Guile v2.2.x find packages in /usr/local -export GUILE_LOAD_PATH="/usr/local/share/guile/site/2.2${GUILE_LOAD_PATH:+:}$GUILE_LOAD_PATH" -export GUILE_LOAD_COMPILED_PATH="/usr/local/lib/guile/2.2/site-ccache${GUILE_LOAD_COMPILED_PATH:+:}$GUILE_COMPILED_LOAD_PATH" +# Help Guile v3.0.x find packages in /usr/local +export GUILE_LOAD_PATH="/usr/local/share/guile/site/3.0${GUILE_LOAD_PATH:+:}$GUILE_LOAD_PATH" +export GUILE_LOAD_COMPILED_PATH="/usr/local/lib/guile/3.0/site-ccache${GUILE_LOAD_COMPILED_PATH:+:}$GUILE_COMPILED_LOAD_PATH" ``` Note that these environment variables are used to check for packages during @@ -352,7 +300,7 @@ Relevant for: - Those installing `guile-git` from their distribution where `guile-git` is built against `libgit2 < 1.1` -As of v0.4.0, `guile-git` claims to only require `libgit2 >= 0.28.0`, however, +As of v0.5.2, `guile-git` claims to only require `libgit2 >= 0.28.0`, however, it actually requires `libgit2 >= 1.1`, otherwise, it will be confused by a reference of `origin/keyring`: instead of interpreting the reference as "the 'keyring' branch of the 'origin' remote", the reference is interpreted as "the @@ -366,20 +314,6 @@ Should you be in this situation, you need to build both `libgit2 v1.1.x` and Source: https://logs.guix.gnu.org/guix/2020-11-12.log#232527 -##### `{scheme,guile}-bytestructures` v1.0.8 and v1.0.9 are broken for Guile v2.2 - -Relevant for: -- Those building `{scheme,guile}-bytestructures` from source against Guile v2.2 - -Commit -[707eea3](https://github.com/TaylanUB/scheme-bytestructures/commit/707eea3a85e1e375e86702229ebf73d496377669) -introduced a regression for Guile v2.2 and was first included in v1.0.8, this -was later corrected in commit -[ec9a721](https://github.com/TaylanUB/scheme-bytestructures/commit/ec9a721957c17bcda13148f8faa5f06934431ff7) -and included in v1.1.0. - -TL;DR If you decided to use Guile v2.2, do not use `{scheme,guile}-bytestructures` v1.0.8 or v1.0.9. - ### Building and Installing Guix itself Start by cloning Guix: @@ -389,10 +323,8 @@ git clone https://git.savannah.gnu.org/git/guix.git cd guix ``` -You will likely want to build the latest release, however, if the latest release -when you're reading this is still 1.3.0 then you may want to use 998eda30 instead -to avoid the issues described in [#25099]( -https://github.com/bitcoin/bitcoin/pull/25099). +You will likely want to build the latest release. +At the time of writing (November 2023), the latest release was `v1.4.0`. ``` git branch -a -l 'origin/version-*' # check for the latest release @@ -578,7 +510,7 @@ sudo --login guix pull --commit= ``` `guix pull` is quite a long process (especially if you're using -`--no-substitute`). If you encounter build problems, please refer to the +`--no-substitutes`). If you encounter build problems, please refer to the [troubleshooting section](#troubleshooting). Note that running a bare `guix pull` with no commit or branch specified will @@ -616,7 +548,7 @@ systemctl enable guix-daemon systemctl start guix-daemon ``` -Remember to set `--no-substitute` in `$libdir/systemd/system/guix-daemon.service` and other customizations if you used them for `guix-daemon-original.service`. +Remember to set `--no-substitutes` in `$libdir/systemd/system/guix-daemon.service` and other customizations if you used them for `guix-daemon-original.service`. ##### If you installed Guix via the Debian/Ubuntu distribution packages @@ -726,26 +658,18 @@ $ bzcat /var/log/guix/drvs/../...-foo-3.6.12.drv.bz2 | less times, it may be `/tmp/...drv-1` or `/tmp/...drv-2`. Always consult the build failure output for the most accurate, up-to-date information. -### openssl-1.1.1l and openssl-1.1.1n - -OpenSSL includes tests that will fail once some certificate has expired. A workaround -is to change your system clock: - -```sh -sudo timedatectl set-ntp no -sudo date --set "28 may 2022 15:00:00" -sudo --login guix build --cores=1 /gnu/store/g9alz81w4q03ncm542487xd001s6akd4-openssl-1.1.1l.drv -sudo --login guix build --cores=1 /gnu/store/mw6ax0gk33gh082anrdrxp2flrbskxv6-openssl-1.1.1n.drv -sudo timedatectl set-ntp yes -``` - ### python(-minimal): [Errno 84] Invalid or incomplete multibyte or wide character This error occurs when your `$TMPDIR` (default: /tmp) exists on a filesystem which rejects characters not present in the UTF-8 character code set. An example is ZFS with the utf8only=on option set. -More information: https://bugs.python.org/issue37584 +More information: https://github.com/python/cpython/issues/81765 + +### openssl-1.1.1l and openssl-1.1.1n + +OpenSSL includes tests that will fail once some certificate has expired. +The workarounds from the GnuTLS section immediately below can be used. ### GnuTLS: test-suite FAIL: status-request-revoked @@ -781,13 +705,41 @@ authorized. This workaround was described [here](https://issues.guix.gnu.org/44559#5). Basically: -1. Turn off networking 2. Turn off NTP 3. Set system time to 2020-10-01 4. guix build --no-substitutes /gnu/store/vhphki5sg9xkdhh2pbc8gi6vhpfzryf0-gnutls-3.6.12.drv 5. Set system time back to accurate current time 6. Turn NTP back on -7. Turn networking back on + +For example, + +```sh +sudo timedatectl set-ntp no +sudo date --set "01 oct 2020 15:00:00" +guix build /gnu/store/vhphki5sg9xkdhh2pbc8gi6vhpfzryf0-gnutls-3.6.12.drv +sudo timedatectl set-ntp yes +``` + +#### Workaround 3: Disable the tests in the Guix source code for this single derivation + +If all of the above workarounds fail, you can also disable the `tests` phase of +the derivation via the `arguments` option, as described in the official +[`package` +reference](https://guix.gnu.org/manual/en/html_node/package-Reference.html). + +For example, to disable the openssl-1.1 check phase: + +```diff +diff --git a/gnu/packages/tls.scm b/gnu/packages/tls.scm +index f1e844b..1077c4b 100644 +--- a/gnu/packages/tls.scm ++++ b/gnu/packages/tls.scm +@@ -494,4 +494,5 @@ (define-public openssl-1.1 + (arguments + `(#:parallel-tests? #f ++ #:tests? #f + #:test-target "test" +``` ### coreutils: FAIL: tests/tail-2/inotify-dir-recreate @@ -796,7 +748,7 @@ The inotify-dir-create test fails on "remote" filesystems such as overlayfs as non-remote. A relatively easy workaround to this is to make sure that a somewhat traditional -filesystem is mounted at `/tmp` (where `guix-daemon` performs its builds), see [/tmp on tmpfs](#consider-tmp-on-tmpfs). For +filesystem is mounted at `/tmp` (where `guix-daemon` performs its builds). For Docker users, this might mean [using a volume][docker/volumes], [binding mounting][docker/bind-mnt] from host, or (for those with enough RAM and swap) [mounting a tmpfs][docker/tmpfs] using the `--tmpfs` flag. diff --git a/contrib/guix/README.md b/contrib/guix/README.md index c0feb486ff2a3..6fb647f8a9d13 100644 --- a/contrib/guix/README.md +++ b/contrib/guix/README.md @@ -11,7 +11,7 @@ We achieve bootstrappability by using Guix as a functional package manager. # Requirements -Conservatively, you will need an x86_64 machine with: +Conservatively, you will need: - 16GB of free disk space on the partition that /gnu/store will reside in - 8GB of free disk space **per platform triple** you're planning on building @@ -259,7 +259,7 @@ details. Override the number of jobs to run simultaneously, you might want to do so on a memory-limited machine. This may be passed to: - - `guix` build commands as in `guix environment --cores="$JOBS"` + - `guix` build commands as in `guix shell --cores="$JOBS"` - `make` as in `make --jobs="$JOBS"` - `xargs` as in `xargs -P"$JOBS"` @@ -301,7 +301,7 @@ details. * _**ADDITIONAL_GUIX_ENVIRONMENT_FLAGS**_ - Additional flags to be passed to the invocation of `guix environment` inside + Additional flags to be passed to the invocation of `guix shell` inside `guix time-machine`. # Choosing your security model diff --git a/contrib/guix/guix-build b/contrib/guix/guix-build index 74b24b9612072..298e7bfbd689f 100755 --- a/contrib/guix/guix-build +++ b/contrib/guix/guix-build @@ -365,7 +365,7 @@ EOF # Run the build script 'contrib/guix/libexec/build.sh' in the build # container specified by 'contrib/guix/manifest.scm'. # - # Explanation of `guix environment` flags: + # Explanation of `guix shell` flags: # # --container run command within an isolated container # @@ -428,7 +428,7 @@ EOF # more information. # # shellcheck disable=SC2086,SC2031 - time-machine environment --manifest="${PWD}/contrib/guix/manifest.scm" \ + time-machine shell --manifest="${PWD}/contrib/guix/manifest.scm" \ --container \ --pure \ --no-cwd \ diff --git a/contrib/guix/guix-codesign b/contrib/guix/guix-codesign index 3279d431aaf34..4694209e00d5b 100755 --- a/contrib/guix/guix-codesign +++ b/contrib/guix/guix-codesign @@ -286,7 +286,7 @@ EOF # Run the build script 'contrib/guix/libexec/build.sh' in the build # container specified by 'contrib/guix/manifest.scm'. # - # Explanation of `guix environment` flags: + # Explanation of `guix shell` flags: # # --container run command within an isolated container # @@ -343,7 +343,7 @@ EOF # more information. # # shellcheck disable=SC2086,SC2031 - time-machine environment --manifest="${PWD}/contrib/guix/manifest.scm" \ + time-machine shell --manifest="${PWD}/contrib/guix/manifest.scm" \ --container \ --pure \ --no-cwd \ diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh index e0bd15493f884..b301369ad92eb 100755 --- a/contrib/guix/libexec/build.sh +++ b/contrib/guix/libexec/build.sh @@ -8,7 +8,7 @@ export TZ=UTC # Although Guix _does_ set umask when building its own packages (in our case, # this is all packages in manifest.scm), it does not set it for `guix -# environment`. It does make sense for at least `guix environment --container` +# shell`. It does make sense for at least `guix shell --container` # to set umask, so if that change gets merged upstream and we bump the # time-machine to a commit which includes the aforementioned change, we can # remove this line. @@ -52,7 +52,8 @@ BASEPREFIX="${PWD}/depends" store_path() { grep --extended-regexp "/[^-]{32}-${1}-[^-]+${2:+-${2}}" "${GUIX_ENVIRONMENT}/manifest" \ | head --lines=1 \ - | sed --expression='s|^[[:space:]]*"||' \ + | sed --expression='s|\x29*$||' \ + --expression='s|^[[:space:]]*"||' \ --expression='s|"[[:space:]]*$||' } @@ -314,7 +315,7 @@ mkdir -p "$DISTSRC" | gzip -9n > "${OUTDIR}/${DISTNAME}-${HOST}-unsigned.tar.gz" \ || ( rm -f "${OUTDIR}/${DISTNAME}-${HOST}-unsigned.tar.gz" && exit 1 ) ) - make deploy ${V:+V=1} OSX_DMG="${OUTDIR}/${DISTNAME}-${HOST}-unsigned.dmg" + make deploy ${V:+V=1} OSX_ZIP="${OUTDIR}/${DISTNAME}-${HOST}-unsigned.zip" ;; esac ( diff --git a/contrib/guix/libexec/codesign.sh b/contrib/guix/libexec/codesign.sh index 80277046f45f2..a7f4020270069 100755 --- a/contrib/guix/libexec/codesign.sh +++ b/contrib/guix/libexec/codesign.sh @@ -8,7 +8,7 @@ export TZ=UTC # Although Guix _does_ set umask when building its own packages (in our case, # this is all packages in manifest.scm), it does not set it for `guix -# environment`. It does make sense for at least `guix environment --container` +# shell`. It does make sense for at least `guix shell --container` # to set umask, so if that change gets merged upstream and we bump the # time-machine to a commit which includes the aforementioned change, we can # remove this line. @@ -85,11 +85,12 @@ mkdir -p "$DISTSRC" # Apply detached codesignatures to dist/ (in-place) signapple apply dist/Navcoin-Qt.app codesignatures/osx/dist - # Make a DMG from dist/ - xorrisofs -D -l -V "$(< osx_volname)" -no-pad -r -dir-mode 0755 \ - -o "${OUTDIR}/${DISTNAME}-${HOST}.dmg" \ - dist \ - -- -volume_date all_file_dates ="$SOURCE_DATE_EPOCH" + # Make a .zip from dist/ + cd dist/ + find . -print0 \ + | xargs -0r touch --no-dereference --date="@${SOURCE_DATE_EPOCH}" + find . | sort \ + | zip -X@ "${OUTDIR}/${DISTNAME}-${HOST}.zip" ;; *) exit 1 diff --git a/contrib/guix/libexec/prelude.bash b/contrib/guix/libexec/prelude.bash index 3eb8fc02dae65..6c912ca748d7e 100644 --- a/contrib/guix/libexec/prelude.bash +++ b/contrib/guix/libexec/prelude.bash @@ -51,7 +51,7 @@ fi time-machine() { # shellcheck disable=SC2086 guix time-machine --url=https://git.savannah.gnu.org/git/guix.git \ - --commit=998eda3067c7d21e0d9bb3310d2f5a14b8f1c681 \ + --commit=d5ca4d4fd713a9f7e17e074a1e37dda99bbb09fc \ --cores="$JOBS" \ --keep-failed \ --fallback \ diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm index d83ff08713d50..7335596107e62 100644 --- a/contrib/guix/manifest.scm +++ b/contrib/guix/manifest.scm @@ -1,44 +1,35 @@ -(use-modules (gnu) - (gnu packages) +(use-modules (gnu packages) (gnu packages autotools) - (gnu packages base) - (gnu packages bash) + ((gnu packages bash) #:select (bash-minimal)) (gnu packages bison) - (gnu packages certs) - (gnu packages cdrom) - (gnu packages check) - (gnu packages cmake) + ((gnu packages certs) #:select (nss-certs)) + ((gnu packages cmake) #:select (cmake-minimal)) (gnu packages commencement) (gnu packages compression) (gnu packages cross-base) - (gnu packages curl) (gnu packages file) (gnu packages gawk) (gnu packages gcc) - (gnu packages gnome) - (gnu packages installers) - (gnu packages linux) + ((gnu packages installers) #:select (nsis-x86_64)) + ((gnu packages linux) #:select (linux-libre-headers-6.1 util-linux)) (gnu packages llvm) (gnu packages mingw) (gnu packages moreutils) (gnu packages pkg-config) - (gnu packages python) - (gnu packages python-crypto) - (gnu packages python-web) - (gnu packages shells) - (gnu packages tls) - (gnu packages version-control) + ((gnu packages python) #:select (python-minimal)) + ((gnu packages python-build) #:select (python-tomli)) + ((gnu packages python-crypto) #:select (python-asn1crypto)) + ((gnu packages tls) #:select (openssl)) + ((gnu packages version-control) #:select (git-minimal)) (guix build-system cmake) (guix build-system gnu) (guix build-system python) (guix build-system trivial) - (guix download) (guix gexp) (guix git-download) ((guix licenses) #:prefix license:) (guix packages) - (guix profiles) - (guix utils)) + ((guix utils) #:select (substitute-keyword-arguments))) (define-syntax-rule (search-our-patches file-name ...) "Return the list of absolute file names corresponding to each @@ -47,41 +38,7 @@ FILE-NAME found in ./patches relative to the current file." ((%patch-path (list (string-append (dirname (current-filename)) "/patches")))) (list (search-patch file-name) ...))) -(define (make-ssp-fixed-gcc xgcc) - "Given a XGCC package, return a modified package that uses the SSP function -from glibc instead of from libssp.so. Our `symbol-check' script will complain if -we link against libssp.so, and thus will ensure that this works properly. - -Taken from: -http://www.linuxfromscratch.org/hlfs/view/development/chapter05/gcc-pass1.html" - (package - (inherit xgcc) - (arguments - (substitute-keyword-arguments (package-arguments xgcc) - ((#:make-flags flags) - `(cons "gcc_cv_libc_provides_ssp=yes" ,flags)))))) - -(define (make-gcc-rpath-link xgcc) - "Given a XGCC package, return a modified package that replace each instance of --rpath in the default system spec that's inserted by Guix with -rpath-link" - (package - (inherit xgcc) - (arguments - (substitute-keyword-arguments (package-arguments xgcc) - ((#:phases phases) - `(modify-phases ,phases - (add-after 'pre-configure 'replace-rpath-with-rpath-link - (lambda _ - (substitute* (cons "gcc/config/rs6000/sysv4.h" - (find-files "gcc/config" - "^gnu-user.*\\.h$")) - (("-rpath=") "-rpath-link=")) - #t)))))))) - -(define building-on (string-append (list-ref (string-split (%current-system) #\-) 0) "-guix-linux-gnu")) - -(define (explicit-cross-configure package) - (package-with-extra-configure-variable package "--build" building-on)) +(define building-on (string-append "--build=" (list-ref (string-split (%current-system) #\-) 0) "-guix-linux-gnu")) (define (make-cross-toolchain target base-gcc-for-libc @@ -92,28 +49,28 @@ http://www.linuxfromscratch.org/hlfs/view/development/chapter05/gcc-pass1.html" (let* ((xbinutils (cross-binutils target)) ;; 1. Build a cross-compiling gcc without targeting any libc, derived ;; from BASE-GCC-FOR-LIBC - (xgcc-sans-libc (explicit-cross-configure (cross-gcc target - #:xgcc base-gcc-for-libc - #:xbinutils xbinutils))) + (xgcc-sans-libc (cross-gcc target + #:xgcc base-gcc-for-libc + #:xbinutils xbinutils)) ;; 2. Build cross-compiled kernel headers with XGCC-SANS-LIBC, derived ;; from BASE-KERNEL-HEADERS (xkernel (cross-kernel-headers target - base-kernel-headers - xgcc-sans-libc - xbinutils)) + #:linux-headers base-kernel-headers + #:xgcc xgcc-sans-libc + #:xbinutils xbinutils)) ;; 3. Build a cross-compiled libc with XGCC-SANS-LIBC and XKERNEL, ;; derived from BASE-LIBC - (xlibc (explicit-cross-configure (cross-libc target - base-libc - xgcc-sans-libc - xbinutils - xkernel))) + (xlibc (cross-libc target + #:libc base-libc + #:xgcc xgcc-sans-libc + #:xbinutils xbinutils + #:xheaders xkernel)) ;; 4. Build a cross-compiling gcc targeting XLIBC, derived from ;; BASE-GCC - (xgcc (explicit-cross-configure (cross-gcc target - #:xgcc base-gcc - #:xbinutils xbinutils - #:libc xlibc)))) + (xgcc (cross-gcc target + #:xgcc base-gcc + #:xbinutils xbinutils + #:libc xlibc))) ;; Define a meta-package that propagates the resulting XBINUTILS, XLIBC, and ;; XGCC (package @@ -123,11 +80,11 @@ http://www.linuxfromscratch.org/hlfs/view/development/chapter05/gcc-pass1.html" (build-system trivial-build-system) (arguments '(#:builder (begin (mkdir %output) #t))) (propagated-inputs - `(("binutils" ,xbinutils) - ("libc" ,xlibc) - ("libc:static" ,xlibc "static") - ("gcc" ,xgcc) - ("gcc-lib" ,xgcc "lib"))) + (list xbinutils + xlibc + xgcc + `(,xlibc "static") + `(,xgcc "lib"))) (synopsis (string-append "Complete GCC tool chain for " target)) (description (string-append "This package provides a complete GCC tool chain for " target " development.")) @@ -135,23 +92,14 @@ chain for " target " development.")) (license (package-license xgcc))))) (define base-gcc gcc-10) -(define base-linux-kernel-headers linux-libre-headers-5.15) - -;; https://gcc.gnu.org/install/configure.html -(define (hardened-gcc gcc) - (package-with-extra-configure-variable ( - package-with-extra-configure-variable ( - package-with-extra-configure-variable gcc - "--enable-initfini-array" "yes") - "--enable-default-ssp" "yes") - "--enable-default-pie" "yes")) +(define base-linux-kernel-headers linux-libre-headers-6.1) (define* (make-bitcoin-cross-toolchain target #:key - (base-gcc-for-libc base-gcc) + (base-gcc-for-libc linux-base-gcc) (base-kernel-headers base-linux-kernel-headers) - (base-libc (hardened-glibc glibc-2.27)) - (base-gcc (make-gcc-rpath-link (hardened-gcc base-gcc)))) + (base-libc glibc-2.27) + (base-gcc linux-base-gcc)) "Convenience wrapper around MAKE-CROSS-TOOLCHAIN with default values desirable for building Bitcoin Core release binaries." (make-cross-toolchain target @@ -160,26 +108,19 @@ desirable for building Bitcoin Core release binaries." base-libc base-gcc)) -(define (make-gcc-with-pthreads gcc) - (package-with-extra-configure-variable - (package-with-extra-patches gcc - (search-our-patches "gcc-10-remap-guix-store.patch")) - "--enable-threads" "posix")) - -(define (make-mingw-w64-cross-gcc cross-gcc) - (package-with-extra-patches cross-gcc - (search-our-patches "vmov-alignment.patch" - "gcc-broken-longjmp.patch"))) +(define (gcc-mingw-patches gcc) + (package-with-extra-patches gcc + (search-our-patches "gcc-remap-guix-store.patch" + "vmov-alignment.patch"))) (define (make-mingw-pthreads-cross-toolchain target) "Create a cross-compilation toolchain package for TARGET" (let* ((xbinutils (cross-binutils target)) (pthreads-xlibc mingw-w64-x86_64-winpthreads) - (pthreads-xgcc (make-gcc-with-pthreads - (cross-gcc target - #:xgcc (make-ssp-fixed-gcc (make-mingw-w64-cross-gcc base-gcc)) + (pthreads-xgcc (cross-gcc target + #:xgcc (gcc-mingw-patches mingw-w64-base-gcc) #:xbinutils xbinutils - #:libc pthreads-xlibc)))) + #:libc pthreads-xlibc))) ;; Define a meta-package that propagates the resulting XBINUTILS, XLIBC, and ;; XGCC (package @@ -189,53 +130,54 @@ desirable for building Bitcoin Core release binaries." (build-system trivial-build-system) (arguments '(#:builder (begin (mkdir %output) #t))) (propagated-inputs - `(("binutils" ,xbinutils) - ("libc" ,pthreads-xlibc) - ("gcc" ,pthreads-xgcc) - ("gcc-lib" ,pthreads-xgcc "lib"))) + (list xbinutils + pthreads-xlibc + pthreads-xgcc + `(,pthreads-xgcc "lib"))) (synopsis (string-append "Complete GCC tool chain for " target)) (description (string-append "This package provides a complete GCC tool chain for " target " development.")) (home-page (package-home-page pthreads-xgcc)) (license (package-license pthreads-xgcc))))) -(define (make-nsis-for-gcc-10 base-nsis) - (package-with-extra-patches base-nsis - (search-our-patches "nsis-gcc-10-memmove.patch" - "nsis-disable-installer-reloc.patch"))) - -(define (fix-ppc64-nx-default lief) - (package-with-extra-patches lief - (search-our-patches "lief-fix-ppc64-nx-default.patch"))) - -;; Our python-lief package can be removed once we are using -;; guix 83bfdb409787cb2737e68b093a319b247b7858e6 or later. -;; Note we currently use cmake-minimal. +;; While LIEF is packaged in Guix, we maintain our own package, +;; to simplify building, and more easily apply updates. +;; Moreover, the Guix's package uses cmake, which caused build +;; failure; see https://github.com/bitcoin/bitcoin/pull/27296. (define-public python-lief (package (name "python-lief") - (version "0.12.3") + (version "0.13.2") (source (origin (method git-fetch) (uri (git-reference (url "https://github.com/lief-project/LIEF") (commit version))) (file-name (git-file-name name version)) + (modules '((guix build utils))) + (snippet + '(begin + ;; Configure build for Python bindings. + (substitute* "api/python/config-default.toml" + (("(ninja = )true" all m) + (string-append m "false")) + (("(parallel-jobs = )0" all m) + (string-append m (number->string (parallel-job-count))))))) (sha256 (base32 - "11i6hqmcjh56y554kqhl61698n9v66j2qk1c1g63mv2w07h2z661")))) + "0y48x358ppig5xp97ahcphfipx7cg9chldj2q5zrmn610fmi4zll")))) (build-system python-build-system) - (native-inputs (list cmake-minimal)) + (native-inputs (list cmake-minimal python-tomli)) (arguments (list #:tests? #f ;needs network #:phases #~(modify-phases %standard-phases + (add-before 'build 'change-directory + (lambda _ + (chdir "api/python"))) (replace 'build (lambda _ - (invoke - "python" "setup.py" "--sdk" "build" - (string-append - "-j" (number->string (parallel-job-count))))))))) + (invoke "python" "setup.py" "build")))))) (home-page "https://github.com/lief-project/LIEF") (synopsis "Library to instrument executable formats") (description @@ -248,18 +190,15 @@ and abstract ELF, PE and MachO formats.") (name "osslsigncode") (version "2.5") (source (origin - (method url-fetch) - (uri (string-append "https://github.com/mtrojnar/" - name "/archive/" version ".tar.gz")) + (method git-fetch) + (uri (git-reference + (url "https://github.com/mtrojnar/osslsigncode") + (commit version))) (sha256 (base32 - "03by9706gg0an6dn48pljx38vcb76ziv11bgm8ilwsf293x2k4hv")))) + "1j47vwq4caxfv0xw68kw5yh00qcpbd56d7rq6c483ma3y7s96yyz")))) (build-system cmake-build-system) - (inputs - `(("openssl", openssl))) - (arguments - '(#:configure-flags - (list "-DCMAKE_DISABLE_FIND_PACKAGE_CURL=TRUE"))) + (inputs (list openssl)) (home-page "https://github.com/mtrojnar/osslsigncode") (synopsis "Authenticode signing and timestamping tool") (description "osslsigncode is a small tool that implements part of the @@ -295,7 +234,7 @@ thus should be able to compile on most platforms where these exist.") (define-public python-oscrypto (package (name "python-oscrypto") - (version "1.2.1") + (version "1.3.0") (source (origin (method git-fetch) @@ -305,7 +244,7 @@ thus should be able to compile on most platforms where these exist.") (file-name (git-file-name name version)) (sha256 (base32 - "1d4d8s4z340qhvb3g5m5v3436y3a71yc26wk4749q64m09kxqc3l")) + "1v5wkmzcyiqy39db8j2dvkdrv2nlsc48556h73x4dzjwd6kg4q0a")) (patches (search-our-patches "oscrypto-hard-code-openssl.patch")))) (build-system python-build-system) (native-search-paths @@ -316,8 +255,7 @@ thus should be able to compile on most platforms where these exist.") (files '("etc/ssl/certs/ca-certificates.crt"))))) (propagated-inputs - `(("python-asn1crypto" ,python-asn1crypto) - ("openssl" ,openssl))) + (list python-asn1crypto openssl)) (arguments `(#:phases (modify-phases %standard-phases @@ -355,7 +293,7 @@ thus should be able to compile on most platforms where these exist.") (package (inherit python-oscrypto) (name "python-oscryptotests") (propagated-inputs - `(("python-oscrypto" ,python-oscrypto))) + (list python-oscrypto)) (arguments `(#:tests? #f #:phases @@ -382,9 +320,9 @@ thus should be able to compile on most platforms where these exist.") "1qw2k7xis53179lpqdqyylbcmp76lj7sagp883wmxg5i7chhc96k")))) (build-system python-build-system) (propagated-inputs - `(("python-asn1crypto" ,python-asn1crypto) - ("python-oscrypto" ,python-oscrypto) - ("python-oscryptotests", python-oscryptotests))) ;; certvalidator tests import oscryptotests + (list python-asn1crypto + python-oscrypto + python-oscryptotests)) ;; certvalidator tests import oscryptotests (arguments `(#:phases (modify-phases %standard-phases @@ -432,79 +370,8 @@ certificates or paths. Supports various options, including: validation at a specific moment in time, whitelisting and revocation checks.") (license license:expat)))) -(define-public python-altgraph - (package - (name "python-altgraph") - (version "0.17") - (source - (origin - (method git-fetch) - (uri (git-reference - (url "https://github.com/ronaldoussoren/altgraph") - (commit (string-append "v" version)))) - (file-name (git-file-name name version)) - (sha256 - (base32 - "09sm4srvvkw458pn48ga9q7ykr4xlz7q8gh1h9w7nxpf001qgpwb")))) - (build-system python-build-system) - (home-page "https://github.com/ronaldoussoren/altgraph") - (synopsis "Python graph (network) package") - (description "altgraph is a fork of graphlib: a graph (network) package for -constructing graphs, BFS and DFS traversals, topological sort, shortest paths, -etc. with graphviz output.") - (license license:expat))) - - -(define-public python-macholib - (package - (name "python-macholib") - (version "1.14") - (source - (origin - (method git-fetch) - (uri (git-reference - (url "https://github.com/ronaldoussoren/macholib") - (commit (string-append "v" version)))) - (file-name (git-file-name name version)) - (sha256 - (base32 - "0aislnnfsza9wl4f0vp45ivzlc0pzhp9d4r08700slrypn5flg42")))) - (build-system python-build-system) - (propagated-inputs - `(("python-altgraph" ,python-altgraph))) - (arguments - '(#:phases - (modify-phases %standard-phases - (add-after 'unpack 'disable-broken-tests - (lambda _ - ;; This test is broken as there is no keyboard interrupt. - (substitute* "macholib_tests/test_command_line.py" - (("^(.*)class TestCmdLine" line indent) - (string-append indent - "@unittest.skip(\"Disabled by Guix\")\n" - line))) - (substitute* "macholib_tests/test_dyld.py" - (("^(.*)def test_\\S+_find" line indent) - (string-append indent - "@unittest.skip(\"Disabled by Guix\")\n" - line)) - (("^(.*)def testBasic" line indent) - (string-append indent - "@unittest.skip(\"Disabled by Guix\")\n" - line)) - ) - #t))))) - (home-page "https://github.com/ronaldoussoren/macholib") - (synopsis "Python library for analyzing and editing Mach-O headers") - (description "macholib is a Macho-O header analyzer and editor. It's -typically used as a dependency analysis tool, and also to rewrite dylib -references in Mach-O headers to be @executable_path relative. Though this tool -targets a platform specific file format, it is pure python code that is platform -and endian independent.") - (license license:expat))) - (define-public python-signapple - (let ((commit "8a945a2e7583be2665cf3a6a89d665b70ecd1ab6")) + (let ((commit "62155712e7417aba07565c9780a80e452823ae6a")) (package (name "python-signapple") (version (git-version "0.1" "1" commit)) @@ -517,15 +384,13 @@ and endian independent.") (file-name (git-file-name name commit)) (sha256 (base32 - "0fr1hangvfyiwflca6jg5g8zvg3jc9qr7vd2c12ff89pznf38dlg")))) + "1nm6rm4h4m7kbq729si4cm8rzild62mk4ni8xr5zja7l33fhv3gb")))) (build-system python-build-system) (propagated-inputs - `(("python-asn1crypto" ,python-asn1crypto) - ("python-oscrypto" ,python-oscrypto) - ("python-certvalidator" ,python-certvalidator) - ("python-elfesteem" ,python-elfesteem) - ("python-requests" ,python-requests) - ("python-macholib" ,python-macholib))) + (list python-asn1crypto + python-oscrypto + python-certvalidator + python-elfesteem)) ;; There are no tests, but attempting to run python setup.py test leads to ;; problems, just disable the test (arguments '(#:tests? #f)) @@ -535,16 +400,41 @@ and endian independent.") inspecting signatures in Mach-O binaries.") (license license:expat)))) -;; https://www.gnu.org/software/libc/manual/html_node/Configuring-and-compiling.html -;; We don't use --disable-werror directly, as that would be passed through to bash, -;; and cause it's build to fail. -(define (hardened-glibc glibc) - (package-with-extra-configure-variable ( - package-with-extra-configure-variable ( - package-with-extra-configure-variable glibc - "enable_werror" "no") - "--enable-stack-protector" "all") - "--enable-bind-now" "yes")) +(define-public mingw-w64-base-gcc + (package + (inherit base-gcc) + (arguments + (substitute-keyword-arguments (package-arguments base-gcc) + ((#:configure-flags flags) + `(append ,flags + ;; https://gcc.gnu.org/install/configure.html + (list "--enable-threads=posix", + "--enable-default-ssp=yes", + building-on))))))) + +(define-public linux-base-gcc + (package + (inherit base-gcc) + (arguments + (substitute-keyword-arguments (package-arguments base-gcc) + ((#:configure-flags flags) + `(append ,flags + ;; https://gcc.gnu.org/install/configure.html + (list "--enable-initfini-array=yes", + "--enable-default-ssp=yes", + "--enable-default-pie=yes", + building-on))) + ((#:phases phases) + `(modify-phases ,phases + ;; Given a XGCC package, return a modified package that replace each instance of + ;; -rpath in the default system spec that's inserted by Guix with -rpath-link + (add-after 'pre-configure 'replace-rpath-with-rpath-link + (lambda _ + (substitute* (cons "gcc/config/rs6000/sysv4.h" + (find-files "gcc/config" + "^gnu-user.*\\.h$")) + (("-rpath=") "-rpath-link=")) + #t)))))))) (define-public glibc-2.27 (package @@ -559,11 +449,32 @@ inspecting signatures in Mach-O binaries.") (sha256 (base32 "0azpb9cvnbv25zg8019rqz48h8i2257ngyjg566dlnp74ivrs9vq")) - (patches (search-our-patches "glibc-ldd-x86_64.patch" - "glibc-versioned-locpath.patch" - "glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch" + (patches (search-our-patches "glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch" "glibc-2.27-fcommon.patch" - "glibc-2.27-guix-prefix.patch")))))) + "glibc-2.27-guix-prefix.patch" + "glibc-2.27-no-librt.patch" + "glibc-2.27-powerpc-ldbrx.patch")))) + (arguments + (substitute-keyword-arguments (package-arguments glibc) + ((#:configure-flags flags) + `(append ,flags + ;; https://www.gnu.org/software/libc/manual/html_node/Configuring-and-compiling.html + (list "--enable-stack-protector=all", + "--enable-bind-now", + "--disable-werror", + building-on))) + ((#:phases phases) + `(modify-phases ,phases + (add-before 'configure 'set-etc-rpc-installation-directory + (lambda* (#:key outputs #:allow-other-keys) + ;; Install the rpc data base file under `$out/etc/rpc'. + ;; Otherwise build will fail with "Permission denied." + (let ((out (assoc-ref outputs "out"))) + (substitute* "sunrpc/Makefile" + (("^\\$\\(inst_sysconfdir\\)/rpc(.*)$" _ suffix) + (string-append out "/etc/rpc" suffix "\n")) + (("^install-others =.*$") + (string-append "install-others = " out "/etc/rpc\n")))))))))))) (packages->manifest (append @@ -589,7 +500,7 @@ inspecting signatures in Mach-O binaries.") xz ;; Build tools gnu-make - libtool-2.4.7 + libtool autoconf-2.71 automake pkg-config @@ -598,21 +509,21 @@ inspecting signatures in Mach-O binaries.") gcc-toolchain-10 (list gcc-toolchain-10 "static") ;; Scripting - python-minimal ;; (3.9) + python-minimal ;; (3.10) ;; Git git-minimal ;; Tests - (fix-ppc64-nx-default python-lief)) + python-lief) (let ((target (getenv "HOST"))) (cond ((string-suffix? "-mingw32" target) ;; Windows (list zip (make-mingw-pthreads-cross-toolchain "x86_64-w64-mingw32") - (make-nsis-for-gcc-10 nsis-x86_64) + nsis-x86_64 nss-certs osslsigncode)) ((string-contains target "-linux-") (list (make-bitcoin-cross-toolchain target))) ((string-contains target "darwin") - (list clang-toolchain-10 binutils cmake-minimal xorriso python-signapple)) + (list clang-toolchain-17 binutils cmake-minimal python-signapple zip)) (else '()))))) diff --git a/contrib/guix/patches/gcc-broken-longjmp.patch b/contrib/guix/patches/gcc-broken-longjmp.patch deleted file mode 100644 index 1cfc0918b090a..0000000000000 --- a/contrib/guix/patches/gcc-broken-longjmp.patch +++ /dev/null @@ -1,68 +0,0 @@ -commit eb5698897c52702498938592d7f76e67d126451f -Author: Eric Botcazou -Date: Wed May 5 22:48:51 2021 +0200 - - Fix PR target/100402 - - This is a regression for 64-bit Windows present from mainline down to the 9 - branch and introduced by the fix for PR target/99234. Again SEH, but with - a twist related to the way MinGW implements setjmp/longjmp, which turns out - to be piggybacked on SEH with recent versions of MinGW, i.e. the longjmp - performs a bona-fide unwinding of the stack, because it calls RtlUnwindEx - with the second argument initially passed to setjmp, which is the result of - __builtin_frame_address (0) in the MinGW header file: - - define setjmp(BUF) _setjmp((BUF), __builtin_frame_address (0)) - - This means that we directly expose the frame pointer to the SEH machinery - here (unlike with regular exception handling where we use an intermediate - CFA) and thus that we cannot do whatever we want with it. The old code - would leave it unaligned, i.e. not multiple of 16, whereas the new code - aligns it, but this breaks for some reason; at least it appears that a - .seh_setframe directive with 0 as second argument always works, so the - fix aligns it this way. - - gcc/ - PR target/100402 - * config/i386/i386.c (ix86_compute_frame_layout): For a SEH target, - always return the establisher frame for __builtin_frame_address (0). - gcc/testsuite/ - * gcc.c-torture/execute/20210505-1.c: New test. - -diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c -index 2f838840e96..06ad1b2274e 100644 ---- a/gcc/config/i386/i386.c -+++ b/gcc/config/i386/i386.c -@@ -6356,12 +6356,29 @@ ix86_compute_frame_layout (void) - area, see the SEH code in config/i386/winnt.c for the rationale. */ - frame->hard_frame_pointer_offset = frame->sse_reg_save_offset; - -- /* If we can leave the frame pointer where it is, do so. Also, return -+ /* If we can leave the frame pointer where it is, do so; however return - the establisher frame for __builtin_frame_address (0) or else if the -- frame overflows the SEH maximum frame size. */ -+ frame overflows the SEH maximum frame size. -+ -+ Note that the value returned by __builtin_frame_address (0) is quite -+ constrained, because setjmp is piggybacked on the SEH machinery with -+ recent versions of MinGW: -+ -+ # elif defined(__SEH__) -+ # if defined(__aarch64__) || defined(_ARM64_) -+ # define setjmp(BUF) _setjmp((BUF), __builtin_sponentry()) -+ # elif (__MINGW_GCC_VERSION < 40702) -+ # define setjmp(BUF) _setjmp((BUF), mingw_getsp()) -+ # else -+ # define setjmp(BUF) _setjmp((BUF), __builtin_frame_address (0)) -+ # endif -+ -+ and the second argument passed to _setjmp, if not null, is forwarded -+ to the TargetFrame parameter of RtlUnwindEx by longjmp (after it has -+ built an ExceptionRecord on the fly describing the setjmp buffer). */ - const HOST_WIDE_INT diff - = frame->stack_pointer_offset - frame->hard_frame_pointer_offset; -- if (diff <= 255) -+ if (diff <= 255 && !crtl->accesses_prior_frames) - { - /* The resulting diff will be a multiple of 16 lower than 255, - i.e. at most 240 as required by the unwind data structure. */ diff --git a/contrib/guix/patches/gcc-10-remap-guix-store.patch b/contrib/guix/patches/gcc-remap-guix-store.patch similarity index 100% rename from contrib/guix/patches/gcc-10-remap-guix-store.patch rename to contrib/guix/patches/gcc-remap-guix-store.patch diff --git a/contrib/guix/patches/glibc-2.27-fcommon.patch b/contrib/guix/patches/glibc-2.27-fcommon.patch index f3baacab98eed..817aa85bb95fb 100644 --- a/contrib/guix/patches/glibc-2.27-fcommon.patch +++ b/contrib/guix/patches/glibc-2.27-fcommon.patch @@ -5,7 +5,7 @@ Date: Fri May 6 11:03:04 2022 +0100 build: use -fcommon to retain legacy behaviour with GCC 10 GCC 10 started using -fno-common by default, which causes issues with - the powerpc builds using gibc 2.24. A patch was commited to glibc to fix + the powerpc builds using gibc 2.27. A patch was commited to glibc to fix the issue, 18363b4f010da9ba459b13310b113ac0647c2fcc but is non-trvial to backport, and was broken in at least one way, see the followup in commit 7650321ce037302bfc2f026aa19e0213b8d02fe6. @@ -17,6 +17,8 @@ Date: Fri May 6 11:03:04 2022 +0100 https://sourceware.org/git/?p=glibc.git;a=commit;h=18363b4f010da9ba459b13310b113ac0647c2fcc https://sourceware.org/git/?p=glibc.git;a=commit;h=7650321ce037302bfc2f026aa19e0213b8d02fe6 + This patch can be dropped when we are building with glibc 2.31+. + diff --git a/Makeconfig b/Makeconfig index 86a71e5802..aa2166be60 100644 --- a/Makeconfig diff --git a/contrib/guix/patches/glibc-2.27-guix-prefix.patch b/contrib/guix/patches/glibc-2.27-guix-prefix.patch index 6648bc6c053df..dc515907ff887 100644 --- a/contrib/guix/patches/glibc-2.27-guix-prefix.patch +++ b/contrib/guix/patches/glibc-2.27-guix-prefix.patch @@ -5,7 +5,7 @@ In order to be reproducible regardless of the architecture used to build the package, map all guix store prefixes to something fixed, e.g. /usr. We might be able to drop this in favour of using --with-nonshared-cflags -when we being using newer versions of glibc. +when we begin using newer versions of glibc. --- a/Makeconfig +++ b/Makeconfig diff --git a/contrib/guix/patches/glibc-2.27-no-librt.patch b/contrib/guix/patches/glibc-2.27-no-librt.patch new file mode 100644 index 0000000000000..4f2092ba7eca5 --- /dev/null +++ b/contrib/guix/patches/glibc-2.27-no-librt.patch @@ -0,0 +1,53 @@ +This patch can be dropped when we are building with glibc 2.30+. + +commit 6e41ef56c9baab719a02f1377b1e7ce7bff61e73 +Author: Florian Weimer +Date: Fri Feb 8 10:21:56 2019 +0100 + + rt: Turn forwards from librt to libc into compat symbols [BZ #24194] + + As the result of commit 6e6249d0b461b952d0f544792372663feb6d792a + ("BZ#14743: Move clock_* symbols from librt to libc."), in glibc 2.17, + clock_gettime, clock_getres, clock_settime, clock_getcpuclockid, + clock_nanosleep were added to libc, and the file rt/clock-compat.c + was added with forwarders to the actual implementations in libc. + These forwarders were wrapped in + + #if SHLIB_COMPAT (librt, GLIBC_2_2, GLIBC_2_17) + + so that they are not present for newer architectures (such as + powerpc64le) with a 2.17 or later ABI baseline. But the forwarders + were not marked as compatibility symbols. As a result, on older + architectures, historic configure checks such as + + AC_CHECK_LIB(rt, clock_gettime) + + still cause linking against librt, even though this is completely + unnecessary. It also creates a needless porting hazard because + architectures behave differently when it comes to symbol availability. + + Reviewed-by: Carlos O'Donell + +diff --git a/rt/clock-compat.c b/rt/clock-compat.c +index f816973c05..11e71aa890 100644 +--- a/rt/clock-compat.c ++++ b/rt/clock-compat.c +@@ -30,14 +30,16 @@ + #if HAVE_IFUNC + # undef INIT_ARCH + # define INIT_ARCH() +-# define COMPAT_REDIRECT(name, proto, arglist) libc_ifunc (name, &__##name) ++# define COMPAT_REDIRECT(name, proto, arglist) libc_ifunc (name, &__##name) \ ++ compat_symbol (librt, name, name, GLIBC_2_2); + #else + # define COMPAT_REDIRECT(name, proto, arglist) \ + int \ + name proto \ + { \ + return __##name arglist; \ +- } ++ } \ ++ compat_symbol (librt, name, name, GLIBC_2_2); + #endif + + COMPAT_REDIRECT (clock_getres, diff --git a/contrib/guix/patches/glibc-2.27-powerpc-ldbrx.patch b/contrib/guix/patches/glibc-2.27-powerpc-ldbrx.patch new file mode 100644 index 0000000000000..26716054c8fcf --- /dev/null +++ b/contrib/guix/patches/glibc-2.27-powerpc-ldbrx.patch @@ -0,0 +1,245 @@ +From 50b0b3c9ff71ffd7ebbd74ae46844c3566478123 Mon Sep 17 00:00:00 2001 +From: "Gabriel F. T. Gomes" +Date: Mon, 27 May 2019 15:21:22 -0300 +Subject: [PATCH] powerpc: Fix build failures with current GCC + +Since GCC commit 271500 (svn), also known as the following commit on the +git mirror: + +commit e154242724b084380e3221df7c08fcdbd8460674 +Author: amodra +Date: Wed May 22 04:34:26 2019 +0000 + + [RS6000] Don't pass -many to the assembler + +glibc builds are failing when an assembly implementation does not +declare the correct '.machine' directive, or when no such directive is +declared at all. For example, when a POWER6 instruction is used, but +'.machine power6' is not declared, the assembler will fail with an error +similar to the following: + + ../sysdeps/powerpc/powerpc64/power8/strcmp.S: Assembler messages: + 24 ../sysdeps/powerpc/powerpc64/power8/strcmp.S:55: Error: unrecognized opcode: `cmpb' + +This patch adds '.machine powerN' directives where none existed, as well +as it updates '.machine power7' directives on POWER8 files, because the +minimum binutils version required to build glibc (binutils 2.25) now +provides this machine version. It also adds '-many' to the assembler +command used to build tst-set_ppr.c. + +Tested for powerpc, powerpc64, and powerpc64le, as well as with +build-many-glibcs.py for powerpc targets. + +Reviewed-by: Tulio Magno Quites Machado Filho +--- + sysdeps/powerpc/Makefile | 5 +++ + sysdeps/powerpc/powerpc64/power4/memcmp.S | 7 ++++ + sysdeps/powerpc/powerpc64/power7/strncmp.S | 1 + + .../powerpc/powerpc64/power8/fpu/s_llround.S | 1 + + sysdeps/powerpc/powerpc64/power8/strcasecmp.S | 36 ++++++------------- + sysdeps/powerpc/powerpc64/power8/strcasestr.S | 14 ++------ + sysdeps/powerpc/powerpc64/power8/strcmp.S | 1 + + 7 files changed, 28 insertions(+), 37 deletions(-) + +diff --git a/sysdeps/powerpc/Makefile b/sysdeps/powerpc/Makefile +index 6aa683b03f..23126147df 100644 +--- a/sysdeps/powerpc/Makefile ++++ b/sysdeps/powerpc/Makefile +@@ -45,6 +45,11 @@ ifeq ($(subdir),misc) + sysdep_headers += sys/platform/ppc.h + tests += test-gettimebase + tests += tst-set_ppr ++ ++# This test is expected to run and exit with EXIT_UNSUPPORTED on ++# processors that do not implement the Power ISA 2.06 or greater. ++# But the test makes use of instructions from Power ISA 2.06 and 2.07. ++CFLAGS-tst-set_ppr.c += -Wa,-many + endif + + ifneq (,$(filter %le,$(config-machine))) +diff --git a/sysdeps/powerpc/powerpc64/power4/memcmp.S b/sysdeps/powerpc/powerpc64/power4/memcmp.S +index e5319f101f..38dcf4c9a1 100644 +--- a/sysdeps/powerpc/powerpc64/power4/memcmp.S ++++ b/sysdeps/powerpc/powerpc64/power4/memcmp.S +@@ -26,7 +26,14 @@ + # define MEMCMP memcmp + #endif + ++#ifndef __LITTLE_ENDIAN__ + .machine power4 ++#else ++/* Little endian is only available since POWER8, so it's safe to ++ specify .machine as power8 (or older), even though this is a POWER4 ++ file. Since the little-endian code uses 'ldbrx', power7 is enough. */ ++ .machine power7 ++#endif + ENTRY_TOCLESS (MEMCMP, 4) + CALL_MCOUNT 3 + +diff --git a/sysdeps/powerpc/powerpc64/power7/strncmp.S b/sysdeps/powerpc/powerpc64/power7/strncmp.S +index 0c7429d19f..10f898c5a3 100644 +--- a/sysdeps/powerpc/powerpc64/power7/strncmp.S ++++ b/sysdeps/powerpc/powerpc64/power7/strncmp.S +@@ -28,6 +28,7 @@ + const char *s2 [r4], + size_t size [r5]) */ + ++ .machine power7 + ENTRY_TOCLESS (STRNCMP, 5) + CALL_MCOUNT 3 + +diff --git a/sysdeps/powerpc/powerpc64/power8/fpu/s_llround.S b/sysdeps/powerpc/powerpc64/power8/fpu/s_llround.S +index a22fc63bb3..84c76ba0f9 100644 +--- a/sysdeps/powerpc/powerpc64/power8/fpu/s_llround.S ++++ b/sysdeps/powerpc/powerpc64/power8/fpu/s_llround.S +@@ -26,6 +26,7 @@ + + /* long long [r3] llround (float x [fp1]) */ + ++ .machine power8 + ENTRY_TOCLESS (__llround) + CALL_MCOUNT 0 + frin fp1,fp1 /* Round to nearest +-0.5. */ +diff --git a/sysdeps/powerpc/powerpc64/power8/strcasecmp.S b/sysdeps/powerpc/powerpc64/power8/strcasecmp.S +index 3a2efe2a64..eeacd40c7f 100644 +--- a/sysdeps/powerpc/powerpc64/power8/strcasecmp.S ++++ b/sysdeps/powerpc/powerpc64/power8/strcasecmp.S +@@ -91,21 +91,7 @@ + 3: \ + TOLOWER() + +-#ifdef _ARCH_PWR8 +-# define VCLZD_V8_v7 vclzd v8, v7; +-# define MFVRD_R3_V1 mfvrd r3, v1; +-# define VSUBUDM_V9_V8 vsubudm v9, v9, v8; +-# define VPOPCNTD_V8_V8 vpopcntd v8, v8; +-# define VADDUQM_V7_V8 vadduqm v9, v7, v8; +-#else +-# define VCLZD_V8_v7 .long 0x11003fc2 +-# define MFVRD_R3_V1 .long 0x7c230067 +-# define VSUBUDM_V9_V8 .long 0x112944c0 +-# define VPOPCNTD_V8_V8 .long 0x110047c3 +-# define VADDUQM_V7_V8 .long 0x11274100 +-#endif +- +- .machine power7 ++ .machine power8 + + ENTRY (__STRCASECMP) + #ifdef USE_AS_STRNCASECMP +@@ -265,15 +251,15 @@ L(different): + #ifdef __LITTLE_ENDIAN__ + /* Count trailing zero. */ + vspltisb v8, -1 +- VADDUQM_V7_V8 ++ vadduqm v9, v7, v8 + vandc v8, v9, v7 +- VPOPCNTD_V8_V8 ++ vpopcntd v8, v8 + vspltb v6, v8, 15 + vcmpequb. v6, v6, v1 + blt cr6, L(shift8) + #else + /* Count leading zero. */ +- VCLZD_V8_v7 ++ vclzd v8, v7 + vspltb v6, v8, 7 + vcmpequb. v6, v6, v1 + blt cr6, L(shift8) +@@ -291,7 +277,7 @@ L(skipsum): + /* Merge and move to GPR. */ + vmrglb v6, v6, v7 + vslo v1, v6, v1 +- MFVRD_R3_V1 ++ mfvrd r3, v1 + /* Place the characters that are different in first position. */ + sldi rSTR2, rRTN, 56 + srdi rSTR2, rSTR2, 56 +@@ -301,7 +287,7 @@ L(skipsum): + vslo v6, v5, v8 + vslo v7, v4, v8 + vmrghb v1, v6, v7 +- MFVRD_R3_V1 ++ mfvrd r3, v1 + srdi rSTR2, rRTN, 48 + sldi rSTR2, rSTR2, 56 + srdi rSTR2, rSTR2, 56 +@@ -320,15 +306,15 @@ L(null_found): + #ifdef __LITTLE_ENDIAN__ + /* Count trailing zero. */ + vspltisb v8, -1 +- VADDUQM_V7_V8 ++ vadduqm v9, v7, v8 + vandc v8, v9, v7 +- VPOPCNTD_V8_V8 ++ vpopcntd v8, v8 + vspltb v6, v8, 15 + vcmpequb. v6, v6, v10 + blt cr6, L(shift_8) + #else + /* Count leading zero. */ +- VCLZD_V8_v7 ++ vclzd v8, v7 + vspltb v6, v8, 7 + vcmpequb. v6, v6, v10 + blt cr6, L(shift_8) +@@ -343,10 +329,10 @@ L(skipsum1): + vspltisb v10, 7 + vslb v10, v10, v10 + vsldoi v9, v0, v10, 1 +- VSUBUDM_V9_V8 ++ vsubudm v9, v9, v8 + vspltisb v8, 8 + vsldoi v8, v0, v8, 1 +- VSUBUDM_V9_V8 ++ vsubudm v9, v9, v8 + /* Shift and remove junk after null character. */ + #ifdef __LITTLE_ENDIAN__ + vslo v5, v5, v9 +diff --git a/sysdeps/powerpc/powerpc64/power8/strcasestr.S b/sysdeps/powerpc/powerpc64/power8/strcasestr.S +index 9fc24c29f9..e10f06fd86 100644 +--- a/sysdeps/powerpc/powerpc64/power8/strcasestr.S ++++ b/sysdeps/powerpc/powerpc64/power8/strcasestr.S +@@ -73,18 +73,8 @@ + vor reg, v8, reg; \ + vcmpequb. v6, reg, v4; + +-/* TODO: change these to the actual instructions when the minimum required +- binutils allows it. */ +-#ifdef _ARCH_PWR8 +-#define VCLZD_V8_v7 vclzd v8, v7; +-#else +-#define VCLZD_V8_v7 .long 0x11003fc2 +-#endif +- + #define FRAMESIZE (FRAME_MIN_SIZE+48) +-/* TODO: change this to .machine power8 when the minimum required binutils +- allows it. */ +- .machine power7 ++ .machine power8 + ENTRY (STRCASESTR, 4) + CALL_MCOUNT 2 + mflr r0 /* Load link register LR to r0. */ +@@ -291,7 +281,7 @@ L(nullchk1): + vcmpequb. v6, v0, v7 + /* Shift r3 by 16 bytes and proceed. */ + blt cr6, L(shift16) +- VCLZD_V8_v7 ++ vclzd v8, v7 + #ifdef __LITTLE_ENDIAN__ + vspltb v6, v8, 15 + #else +diff --git a/sysdeps/powerpc/powerpc64/power8/strcmp.S b/sysdeps/powerpc/powerpc64/power8/strcmp.S +index 15e7351d1b..d592266d1d 100644 +--- a/sysdeps/powerpc/powerpc64/power8/strcmp.S ++++ b/sysdeps/powerpc/powerpc64/power8/strcmp.S +@@ -31,6 +31,7 @@ + 64K as default, the page cross handling assumes minimum page size of + 4k. */ + ++ .machine power8 + ENTRY_TOCLESS (STRCMP, 4) + li r0,0 + +-- +2.41.0 diff --git a/contrib/guix/patches/glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch b/contrib/guix/patches/glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch index c0f8495c41de1..ab8ae9c023231 100644 --- a/contrib/guix/patches/glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch +++ b/contrib/guix/patches/glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch @@ -4,6 +4,8 @@ See also: http://lists.busybox.net/pipermail/buildroot/2020-July/590376.html. https://sourceware.org/git/?p=glibc.git;a=commit;h=0b9c84906f653978fb8768c7ebd0ee14a47e662e +This patch can be dropped when we are building with glibc 2.28+. + From 562c52cc81a4e456a62e6455feb32732049e9070 Mon Sep 17 00:00:00 2001 From: "H.J. Lu" Date: Mon, 31 Dec 2018 09:26:42 -0800 diff --git a/contrib/guix/patches/glibc-ldd-x86_64.patch b/contrib/guix/patches/glibc-ldd-x86_64.patch deleted file mode 100644 index a23b095caa750..0000000000000 --- a/contrib/guix/patches/glibc-ldd-x86_64.patch +++ /dev/null @@ -1,10 +0,0 @@ -By default, 'RTDLLIST' in 'ldd' refers to 'lib64/ld-linux-x86-64.so', whereas -it's in 'lib/' for us. This patch fixes that. - ---- a/sysdeps/unix/sysv/linux/x86_64/ldd-rewrite.sed -+++ b/sysdeps/unix/sysv/linux/x86_64/ldd-rewrite.sed -@@ -1,3 +1,3 @@ - /LD_TRACE_LOADED_OBJECTS=1/a\ - add_env="$add_env LD_LIBRARY_VERSION=\\$verify_out" --s_^\(RTLDLIST=\)\(.*lib\)\(\|64\|x32\)\(/[^/]*\)\(-x86-64\|-x32\)\(\.so\.[0-9.]*\)[ ]*$_\1"\2\4\6 \264\4-x86-64\6 \2x32\4-x32\6"_ -+s_^\(RTLDLIST=\)\(.*lib\)\(\|64\|x32\)\(/[^/]*\)\(-x86-64\|-x32\)\(\.so\.[0-9.]*\)[ ]*$_\1"\2\4\6 \2\4-x86-64\6 \2x32\4-x32\6"_ diff --git a/contrib/guix/patches/glibc-versioned-locpath.patch b/contrib/guix/patches/glibc-versioned-locpath.patch deleted file mode 100644 index bc7652127fa5a..0000000000000 --- a/contrib/guix/patches/glibc-versioned-locpath.patch +++ /dev/null @@ -1,240 +0,0 @@ -The format of locale data can be incompatible between libc versions, and -loading incompatible data can lead to 'setlocale' returning EINVAL at best -or triggering an assertion failure at worst. See -https://lists.gnu.org/archive/html/guix-devel/2015-09/msg00717.html -for background information. - -To address that, this patch changes libc to honor a new 'GUIX_LOCPATH' -variable, and to look for locale data in version-specific sub-directories of -that variable. So, if GUIX_LOCPATH=/foo:/bar, locale data is searched for in -/foo/X.Y and /bar/X.Y, where X.Y is the libc version number. - -That way, a single 'GUIX_LOCPATH' setting can work even if different libc -versions coexist on the system. - ---- a/locale/newlocale.c -+++ b/locale/newlocale.c -@@ -30,6 +30,7 @@ - /* Lock for protecting global data. */ - __libc_rwlock_define (extern , __libc_setlocale_lock attribute_hidden) - -+extern error_t compute_locale_search_path (char **, size_t *); - - /* Use this when we come along an error. */ - #define ERROR_RETURN \ -@@ -48,7 +49,6 @@ __newlocale (int category_mask, const char *locale, __locale_t base) - __locale_t result_ptr; - char *locale_path; - size_t locale_path_len; -- const char *locpath_var; - int cnt; - size_t names_len; - -@@ -102,17 +102,8 @@ __newlocale (int category_mask, const char *locale, __locale_t base) - locale_path = NULL; - locale_path_len = 0; - -- locpath_var = getenv ("LOCPATH"); -- if (locpath_var != NULL && locpath_var[0] != '\0') -- { -- if (__argz_create_sep (locpath_var, ':', -- &locale_path, &locale_path_len) != 0) -- return NULL; -- -- if (__argz_add_sep (&locale_path, &locale_path_len, -- _nl_default_locale_path, ':') != 0) -- return NULL; -- } -+ if (compute_locale_search_path (&locale_path, &locale_path_len) != 0) -+ return NULL; - - /* Get the names for the locales we are interested in. We either - allow a composite name or a single name. */ -diff --git a/locale/setlocale.c b/locale/setlocale.c -index ead030d..0c0e314 100644 ---- a/locale/setlocale.c -+++ b/locale/setlocale.c -@@ -215,12 +215,65 @@ setdata (int category, struct __locale_data *data) - } - } - -+/* Return in *LOCALE_PATH and *LOCALE_PATH_LEN the locale data search path as -+ a colon-separated list. Return ENOMEN on error, zero otherwise. */ -+error_t -+compute_locale_search_path (char **locale_path, size_t *locale_path_len) -+{ -+ char* guix_locpath_var = getenv ("GUIX_LOCPATH"); -+ char *locpath_var = getenv ("LOCPATH"); -+ -+ if (guix_locpath_var != NULL && guix_locpath_var[0] != '\0') -+ { -+ /* Entries in 'GUIX_LOCPATH' take precedence over 'LOCPATH'. These -+ entries are systematically prefixed with "/X.Y" where "X.Y" is the -+ libc version. */ -+ if (__argz_create_sep (guix_locpath_var, ':', -+ locale_path, locale_path_len) != 0 -+ || __argz_suffix_entries (locale_path, locale_path_len, -+ "/" VERSION) != 0) -+ goto bail_out; -+ } -+ -+ if (locpath_var != NULL && locpath_var[0] != '\0') -+ { -+ char *reg_locale_path = NULL; -+ size_t reg_locale_path_len = 0; -+ -+ if (__argz_create_sep (locpath_var, ':', -+ ®_locale_path, ®_locale_path_len) != 0) -+ goto bail_out; -+ -+ if (__argz_append (locale_path, locale_path_len, -+ reg_locale_path, reg_locale_path_len) != 0) -+ goto bail_out; -+ -+ free (reg_locale_path); -+ } -+ -+ if (*locale_path != NULL) -+ { -+ /* Append the system default locale directory. */ -+ if (__argz_add_sep (locale_path, locale_path_len, -+ _nl_default_locale_path, ':') != 0) -+ goto bail_out; -+ } -+ -+ return 0; -+ -+ bail_out: -+ free (*locale_path); -+ *locale_path = NULL; -+ *locale_path_len = 0; -+ -+ return ENOMEM; -+} -+ - char * - setlocale (int category, const char *locale) - { - char *locale_path; - size_t locale_path_len; -- const char *locpath_var; - char *composite; - - /* Sanity check for CATEGORY argument. */ -@@ -251,17 +304,10 @@ setlocale (int category, const char *locale) - locale_path = NULL; - locale_path_len = 0; - -- locpath_var = getenv ("LOCPATH"); -- if (locpath_var != NULL && locpath_var[0] != '\0') -+ if (compute_locale_search_path (&locale_path, &locale_path_len) != 0) - { -- if (__argz_create_sep (locpath_var, ':', -- &locale_path, &locale_path_len) != 0 -- || __argz_add_sep (&locale_path, &locale_path_len, -- _nl_default_locale_path, ':') != 0) -- { -- __libc_rwlock_unlock (__libc_setlocale_lock); -- return NULL; -- } -+ __libc_rwlock_unlock (__libc_setlocale_lock); -+ return NULL; - } - - if (category == LC_ALL) -diff --git a/string/Makefile b/string/Makefile -index 8424a61..f925503 100644 ---- a/string/Makefile -+++ b/string/Makefile -@@ -38,7 +38,7 @@ routines := strcat strchr strcmp strcoll strcpy strcspn \ - swab strfry memfrob memmem rawmemchr strchrnul \ - $(addprefix argz-,append count create ctsep next \ - delete extract insert stringify \ -- addsep replace) \ -+ addsep replace suffix) \ - envz basename \ - strcoll_l strxfrm_l string-inlines memrchr \ - xpg-strerror strerror_l -diff --git a/string/argz-suffix.c b/string/argz-suffix.c -new file mode 100644 -index 0000000..505b0f2 ---- /dev/null -+++ b/string/argz-suffix.c -@@ -0,0 +1,56 @@ -+/* Copyright (C) 2015 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ Contributed by Ludovic Courtès . -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include -+#include -+#include -+#include -+ -+ -+error_t -+__argz_suffix_entries (char **argz, size_t *argz_len, const char *suffix) -+ -+{ -+ size_t suffix_len = strlen (suffix); -+ size_t count = __argz_count (*argz, *argz_len); -+ size_t new_argz_len = *argz_len + count * suffix_len; -+ char *new_argz = malloc (new_argz_len); -+ -+ if (new_argz) -+ { -+ char *p = new_argz, *entry; -+ -+ for (entry = *argz; -+ entry != NULL; -+ entry = argz_next (*argz, *argz_len, entry)) -+ { -+ p = stpcpy (p, entry); -+ p = stpcpy (p, suffix); -+ p++; -+ } -+ -+ free (*argz); -+ *argz = new_argz; -+ *argz_len = new_argz_len; -+ -+ return 0; -+ } -+ else -+ return ENOMEM; -+} -+weak_alias (__argz_suffix_entries, argz_suffix_entries) -diff --git a/string/argz.h b/string/argz.h -index bb62a31..d276a35 100644 ---- a/string/argz.h -+++ b/string/argz.h -@@ -134,6 +134,16 @@ extern error_t argz_replace (char **__restrict __argz, - const char *__restrict __str, - const char *__restrict __with, - unsigned int *__restrict __replace_count); -+ -+/* Suffix each entry of ARGZ & ARGZ_LEN with SUFFIX. Return 0 on success, -+ and ENOMEN if memory cannot be allocated. */ -+extern error_t __argz_suffix_entries (char **__restrict __argz, -+ size_t *__restrict __argz_len, -+ const char *__restrict __suffix); -+extern error_t argz_suffix_entries (char **__restrict __argz, -+ size_t *__restrict __argz_len, -+ const char *__restrict __suffix); -+ - - /* Returns the next entry in ARGZ & ARGZ_LEN after ENTRY, or NULL if there - are no more. If entry is NULL, then the first entry is returned. This diff --git a/contrib/guix/patches/lief-fix-ppc64-nx-default.patch b/contrib/guix/patches/lief-fix-ppc64-nx-default.patch deleted file mode 100644 index 101bc1ddc0cfe..0000000000000 --- a/contrib/guix/patches/lief-fix-ppc64-nx-default.patch +++ /dev/null @@ -1,29 +0,0 @@ -Correct default for Binary::has_nx on ppc64 - -From the Linux kernel source: - - * This is the default if a program doesn't have a PT_GNU_STACK - * program header entry. The PPC64 ELF ABI has a non executable stack - * stack by default, so in the absence of a PT_GNU_STACK program header - * we turn execute permission off. - -This patch can be dropped the next time we update LIEF. - -diff --git a/src/ELF/Binary.cpp b/src/ELF/Binary.cpp -index a90be1ab..fd2d9764 100644 ---- a/src/ELF/Binary.cpp -+++ b/src/ELF/Binary.cpp -@@ -1084,7 +1084,12 @@ bool Binary::has_nx() const { - return segment->type() == SEGMENT_TYPES::PT_GNU_STACK; - }); - if (it_stack == std::end(segments_)) { -- return false; -+ if (header().machine_type() == ARCH::EM_PPC64) { -+ // The PPC64 ELF ABI has a non-executable stack by default. -+ return true; -+ } else { -+ return false; -+ } - } - - return !(*it_stack)->has(ELF_SEGMENT_FLAGS::PF_X); diff --git a/contrib/guix/patches/nsis-disable-installer-reloc.patch b/contrib/guix/patches/nsis-disable-installer-reloc.patch deleted file mode 100644 index 4914527e56beb..0000000000000 --- a/contrib/guix/patches/nsis-disable-installer-reloc.patch +++ /dev/null @@ -1,30 +0,0 @@ -Patch NSIS so that it's installer stubs, produced at NSIS build time, -do not contain .reloc sections, which will exist by default when using -binutils/ld 2.36+. - -This ultimately fixes an issue when running the installer with the -"Force randomization for images (Mandatory ASLR)" setting active. - -This patch has not yet been sent upstream, because it's not clear if this -is the best fix, for the underlying issue, which seems to be that makensis -doesn't account for .reloc sections when it builds installers. - -The existence of a reloc section shouldn't be a problem, and, if anything, -is actually a requirement for working ASLR. All other Windows binaries we -produce contain them, and function correctly when under the same -"Force randomization for images (Mandatory ASLR)" setting. - -See: -https://github.com/bitcoin/bitcoin/issues/25726 -https://sourceforge.net/p/nsis/bugs/1131/ - ---- a/SCons/Config/gnu -+++ b/SCons/Config/gnu -@@ -102,6 +102,7 @@ stub_env.Append(LINKFLAGS = ['-mwindows']) # build windows executables - stub_env.Append(LINKFLAGS = ['$NODEFLIBS_FLAG']) # no standard libraries - stub_env.Append(LINKFLAGS = ['$ALIGN_FLAG']) # 512 bytes align - stub_env.Append(LINKFLAGS = ['$MAP_FLAG']) # generate map file -+stub_env.Append(LINKFLAGS = ['-Wl,--disable-reloc-section']) - - conf = FlagsConfigure(stub_env) - conf.CheckCompileFlag('-fno-tree-loop-distribute-patterns') # GCC 10: Don't generate msvcrt!memmove calls (bug #1248) diff --git a/contrib/guix/patches/nsis-gcc-10-memmove.patch b/contrib/guix/patches/nsis-gcc-10-memmove.patch deleted file mode 100644 index a1aadfd4f3618..0000000000000 --- a/contrib/guix/patches/nsis-gcc-10-memmove.patch +++ /dev/null @@ -1,23 +0,0 @@ -commit f6df41524e703dc471e283e566a48e05a735b7f2 -Author: Anders -Date: Sat Jun 27 23:18:45 2020 +0000 - - Don't let GCC 10 generate memmove calls (bug #1248) - - git-svn-id: https://svn.code.sf.net/p/nsis/code/NSIS/trunk@7189 212acab6-be3b-0410-9dea-997c60f758d6 - -diff --git a/SCons/Config/gnu b/SCons/Config/gnu -index bfcb362d..21fa446b 100644 ---- a/SCons/Config/gnu -+++ b/SCons/Config/gnu -@@ -103,6 +103,10 @@ stub_env.Append(LINKFLAGS = ['$NODEFLIBS_FLAG']) # no standard libraries - stub_env.Append(LINKFLAGS = ['$ALIGN_FLAG']) # 512 bytes align - stub_env.Append(LINKFLAGS = ['$MAP_FLAG']) # generate map file - -+conf = FlagsConfigure(stub_env) -+conf.CheckCompileFlag('-fno-tree-loop-distribute-patterns') # GCC 10: Don't generate msvcrt!memmove calls (bug #1248) -+conf.Finish() -+ - stub_uenv = stub_env.Clone() - stub_uenv.Append(CPPDEFINES = ['_UNICODE', 'UNICODE']) - diff --git a/contrib/guix/patches/vmov-alignment.patch b/contrib/guix/patches/vmov-alignment.patch index 072f76eafd38f..7976b864af0f2 100644 --- a/contrib/guix/patches/vmov-alignment.patch +++ b/contrib/guix/patches/vmov-alignment.patch @@ -1,6 +1,7 @@ Description: Use unaligned VMOV instructions Author: Stephen Kitt Bug-Debian: https://bugs.debian.org/939559 +See also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54412 Based on a patch originally by Claude Heiland-Allen diff --git a/contrib/init/bitcoind.service b/contrib/init/bitcoind.service index 93de353bb4eb0..87da17f95527f 100644 --- a/contrib/init/bitcoind.service +++ b/contrib/init/bitcoind.service @@ -18,10 +18,11 @@ After=network-online.target Wants=network-online.target [Service] -ExecStart=/usr/bin/bitcoind -daemonwait \ - -pid=/run/bitcoind/bitcoind.pid \ +ExecStart=/usr/bin/bitcoind -pid=/run/bitcoind/bitcoind.pid \ -conf=/etc/bitcoin/bitcoin.conf \ - -datadir=/var/lib/bitcoind + -datadir=/var/lib/bitcoind \ + -startupnotify='systemd-notify --ready' \ + -shutdownnotify='systemd-notify --stopping' # Make sure the config directory is readable by the service user PermissionsStartOnly=true @@ -30,8 +31,10 @@ ExecStartPre=/bin/chgrp bitcoin /etc/bitcoin # Process management #################### -Type=forking +Type=notify +NotifyAccess=all PIDFile=/run/bitcoind/bitcoind.pid + Restart=on-failure TimeoutStartSec=infinity TimeoutStopSec=600 diff --git a/contrib/macdeploy/README.md b/contrib/macdeploy/README.md index 599a0bfa6ce49..ea599df3d844b 100644 --- a/contrib/macdeploy/README.md +++ b/contrib/macdeploy/README.md @@ -6,7 +6,7 @@ The `macdeployqtplus` script should not be run manually. Instead, after building make deploy ``` -When complete, it will have produced `Bitcoin-Core.dmg`. +When complete, it will have produced `Bitcoin-Core.zip`. ## SDK Extraction @@ -14,56 +14,50 @@ When complete, it will have produced `Bitcoin-Core.dmg`. A free Apple Developer Account is required to proceed. -Our current macOS SDK -(`Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers.tar.gz`) -can be extracted from -[Xcode_12.2.xip](https://download.developer.apple.com/Developer_Tools/Xcode_12.2/Xcode_12.2.xip). +Our macOS SDK can be extracted from +[Xcode_15.xip](https://download.developer.apple.com/Developer_Tools/Xcode_15/Xcode_15.xip). Alternatively, after logging in to your account go to 'Downloads', then 'More' -and search for [`Xcode 12.2`](https://developer.apple.com/download/all/?q=Xcode%2012.2). +and search for [`Xcode 15`](https://developer.apple.com/download/all/?q=Xcode%2015). An Apple ID and cookies enabled for the hostname are needed to download this. -The `sha256sum` of the downloaded XIP archive should be `28d352f8c14a43d9b8a082ac6338dc173cb153f964c6e8fb6ba389e5be528bd0`. +The `sha256sum` of the downloaded XIP archive should be `4daaed2ef2253c9661779fa40bfff50655dc7ec45801aba5a39653e7bcdde48e`. -After Xcode version 7.x, Apple started shipping the `Xcode.app` in a `.xip` -archive. This makes the SDK less-trivial to extract on non-macOS machines. One -approach (tested on Debian Buster) is outlined below: +To extract the `.xip` on Linux: ```bash # Install/clone tools needed for extracting Xcode.app apt install cpio git clone https://github.com/bitcoin-core/apple-sdk-tools.git -# Unpack Xcode_12.2.xip and place the resulting Xcode.app in your current +# Unpack the .xip and place the resulting Xcode.app in your current # working directory -python3 apple-sdk-tools/extract_xcode.py -f Xcode_12.2.xip | cpio -d -i +python3 apple-sdk-tools/extract_xcode.py -f Xcode_15.xip | cpio -d -i ``` -On macOS the process is more straightforward: +On macOS: ```bash -xip -x Xcode_12.2.xip +xip -x Xcode_15.xip ``` -### Step 2: Generating `Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers.tar.gz` from `Xcode.app` +### Step 2: Generating the SDK tarball from `Xcode.app` -To generate `Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers.tar.gz`, run -the script [`gen-sdk`](./gen-sdk) with the path to `Xcode.app` (extracted in the -previous stage) as the first argument. +To generate the SDK, run the script [`gen-sdk`](./gen-sdk) with the +path to `Xcode.app` (extracted in the previous stage) as the first argument. ```bash -# Generate a Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers.tar.gz from -# the supplied Xcode.app ./contrib/macdeploy/gen-sdk '/path/to/Xcode.app' ``` -The `sha256sum` of the generated TAR.GZ archive should be `df75d30ecafc429e905134333aeae56ac65fac67cb4182622398fd717df77619`. +The generated archive should be: `Xcode-15.0-15A240d-extracted-SDK-with-libcxx-headers.tar.gz`. +The `sha256sum` should be `c0c2e7bb92c1fee0c4e9f3a485e4530786732d6c6dd9e9f418c282aa6892f55d`. -## Deterministic macOS DMG Notes +## Deterministic macOS App Notes -Working macOS DMGs are created in Linux by combining a recent `clang`, the Apple -`binutils` (`ld`, `ar`, etc) and DMG authoring tools. +macOS Applications are created in Linux by combining a recent `clang` and the Apple +`binutils` (`ld`, `ar`, etc). Apple uses `clang` extensively for development and has upstreamed the necessary functionality so that a vanilla clang can take advantage. It supports the use of `-F`, @@ -93,20 +87,15 @@ created using these tools. The build process has been designed to avoid includin SDK's files in Guix's outputs. All interim tarballs are fully deterministic and may be freely redistributed. -[`xorrisofs`](https://www.gnu.org/software/xorriso/) is used to create the DMG. - -A background image is added to DMG files by inserting a `.DS_Store` during creation. - As of OS X 10.9 Mavericks, using an Apple-blessed key to sign binaries is a requirement in order to satisfy the new Gatekeeper requirements. Because this private key cannot be shared, we'll have to be a bit creative in order for the build process to remain somewhat deterministic. Here's how it works: -- Builders use Guix to create an unsigned release. This outputs an unsigned DMG which +- Builders use Guix to create an unsigned release. This outputs an unsigned ZIP which users may choose to bless and run. It also outputs an unsigned app structure in the form - of a tarball, which also contains all of the tools that have been previously (deterministically) - built in order to create a final DMG. + of a tarball. - The Apple keyholder uses this unsigned app to create a detached signature, using the script that is also included there. Detached signatures are available from this [repository](https://github.com/bitcoin-core/bitcoin-detached-sigs). - Builders feed the unsigned app + detached signature back into Guix. It uses the - pre-built tools to recombine the pieces into a deterministic DMG. + pre-built tools to recombine the pieces into a deterministic ZIP. diff --git a/contrib/macdeploy/background.tiff b/contrib/macdeploy/background.tiff deleted file mode 100644 index 1fb088c8374ac..0000000000000 Binary files a/contrib/macdeploy/background.tiff and /dev/null differ diff --git a/contrib/macdeploy/detached-sig-create.sh b/contrib/macdeploy/detached-sig-create.sh index 5c29e9f499f4d..214088eaa4008 100755 --- a/contrib/macdeploy/detached-sig-create.sh +++ b/contrib/macdeploy/detached-sig-create.sh @@ -24,7 +24,7 @@ fi rm -rf ${TEMPDIR} mkdir -p ${TEMPDIR} -${SIGNAPPLE} sign -f --detach "${TEMPDIR}/${OUTROOT}" "$@" "${BUNDLE}" +${SIGNAPPLE} sign -f --detach "${TEMPDIR}/${OUTROOT}" "$@" "${BUNDLE}" --hardened-runtime tar -C "${TEMPDIR}" -czf "${OUT}" . rm -rf "${TEMPDIR}" diff --git a/contrib/macdeploy/gen-sdk b/contrib/macdeploy/gen-sdk index 6efaaccb8e16b..b73f5cba14c48 100755 --- a/contrib/macdeploy/gen-sdk +++ b/contrib/macdeploy/gen-sdk @@ -62,9 +62,6 @@ def run(): out_name = "Xcode-{xcode_version}-{xcode_build_id}-extracted-SDK-with-libcxx-headers".format(xcode_version=xcode_version, xcode_build_id=xcode_build_id) - xcode_libcxx_dir = xcode_app.joinpath("Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1") - assert xcode_libcxx_dir.is_dir() - if args.out_sdktgz: out_sdktgz_path = pathlib.Path(args.out_sdktgz_path) else: @@ -72,7 +69,7 @@ def run(): out_sdktgz_path = pathlib.Path("./{}.tar.gz".format(out_name)) def tarfp_add_with_base_change(tarfp, dir_to_add, alt_base_dir): - """Add all files in dir_to_add to tarfp, but prepent MEMBERPREFIX to the files' + """Add all files in dir_to_add to tarfp, but prepent alt_base_dir to the files' names e.g. if the only file under /root/bazdir is /root/bazdir/qux, invoking: @@ -107,8 +104,6 @@ def run(): with tarfile.open(mode="w", fileobj=gzf, format=tarfile.GNU_FORMAT) as tarfp: print("Adding MacOSX SDK {} files...".format(sdk_version)) tarfp_add_with_base_change(tarfp, sdk_dir, out_name) - print("Adding libc++ headers...") - tarfp_add_with_base_change(tarfp, xcode_libcxx_dir, "{}/usr/include/c++/v1".format(out_name)) print("Done! Find the resulting gzipped tarball at:") print(out_sdktgz_path.resolve()) diff --git a/contrib/macdeploy/macdeployqtplus b/contrib/macdeploy/macdeployqtplus index 7ec254482019c..b4d5df2dcf333 100755 --- a/contrib/macdeploy/macdeployqtplus +++ b/contrib/macdeploy/macdeployqtplus @@ -18,11 +18,9 @@ import sys, re, os, platform, shutil, stat, subprocess, os.path from argparse import ArgumentParser -from ds_store import DSStore -from mac_alias import Alias from pathlib import Path from subprocess import PIPE, run -from typing import List, Optional +from typing import Optional # This is ported from the original macdeployqt with modifications @@ -183,7 +181,7 @@ class DeploymentInfo(object): return True return False -def getFrameworks(binaryPath: str, verbose: int) -> List[FrameworkInfo]: +def getFrameworks(binaryPath: str, verbose: int) -> list[FrameworkInfo]: if verbose: print(f"Inspecting with otool: {binaryPath}") otoolbin=os.getenv("OTOOL", "otool") @@ -287,7 +285,7 @@ def copyFramework(framework: FrameworkInfo, path: str, verbose: int) -> Optional return toPath -def deployFrameworks(frameworks: List[FrameworkInfo], bundlePath: str, binaryPath: str, strip: bool, verbose: int, deploymentInfo: Optional[DeploymentInfo] = None) -> DeploymentInfo: +def deployFrameworks(frameworks: list[FrameworkInfo], bundlePath: str, binaryPath: str, strip: bool, verbose: int, deploymentInfo: Optional[DeploymentInfo] = None) -> DeploymentInfo: if deploymentInfo is None: deploymentInfo = DeploymentInfo() @@ -385,7 +383,7 @@ def deployPlugins(appBundleInfo: ApplicationBundleInfo, deploymentInfo: Deployme ap = ArgumentParser(description="""Improved version of macdeployqt. -Outputs a ready-to-deploy app in a folder "dist" and optionally wraps it in a .dmg file. +Outputs a ready-to-deploy app in a folder "dist" and optionally wraps it in a .zip file. Note, that the "dist" folder will be deleted before deploying on each run. Optionally, Qt translation files (.qm) can be added to the bundle.""") @@ -395,8 +393,8 @@ ap.add_argument("appname", nargs=1, metavar="appname", help="name of the app bei ap.add_argument("-verbose", nargs="?", const=True, help="Output additional debugging information") ap.add_argument("-no-plugins", dest="plugins", action="store_false", default=True, help="skip plugin deployment") ap.add_argument("-no-strip", dest="strip", action="store_false", default=True, help="don't run 'strip' on the binaries") -ap.add_argument("-dmg", nargs="?", const="", metavar="basename", help="create a .dmg disk image") ap.add_argument("-translations-dir", nargs=1, metavar="path", default=None, help="Path to Qt's translations. Base translations will automatically be added to the bundle's resources.") +ap.add_argument("-zip", nargs="?", const="", metavar="zip", help="create a .zip containing the app bundle") config = ap.parse_args() @@ -417,12 +415,9 @@ if os.path.exists("dist"): print("+ Removing existing dist folder +") shutil.rmtree("dist") -if os.path.exists(appname + ".dmg"): - print("+ Removing existing DMG +") - os.unlink(appname + ".dmg") - -if os.path.exists(appname + ".temp.dmg"): - os.unlink(appname + ".temp.dmg") +if os.path.exists(appname + ".zip"): + print("+ Removing existing .zip +") + os.unlink(appname + ".zip") # ------------------------------------------------ @@ -497,99 +492,13 @@ with open(os.path.join(applicationBundle.resourcesPath, "qt.conf"), "wb") as f: # ------------------------------------------------ -print("+ Generating .DS_Store +") - -output_file = os.path.join("dist", ".DS_Store") - -ds = DSStore.open(output_file, 'w+') - -ds['.']['bwsp'] = { - 'WindowBounds': '{{300, 280}, {500, 343}}', - 'PreviewPaneVisibility': False, -} - -icvp = { - 'gridOffsetX': 0.0, - 'textSize': 12.0, - 'viewOptionsVersion': 1, - 'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00', - 'backgroundColorBlue': 1.0, - 'iconSize': 96.0, - 'backgroundColorGreen': 1.0, - 'arrangeBy': 'none', - 'showIconPreview': True, - 'gridSpacing': 100.0, - 'gridOffsetY': 0.0, - 'showItemInfo': False, - 'labelOnBottom': True, - 'backgroundType': 2, - 'backgroundColorRed': 1.0 -} -alias = Alias().from_bytes(icvp['backgroundImageAlias']) -alias.volume.name = appname -alias.volume.posix_path = '/Volumes/' + appname -icvp['backgroundImageAlias'] = alias.to_bytes() -ds['.']['icvp'] = icvp - -ds['.']['vSrn'] = ('long', 1) - -ds['Applications']['Iloc'] = (370, 156) -ds['Navcoin-Qt.app']['Iloc'] = (128, 156) - -ds.flush() -ds.close() - -# ------------------------------------------------ - if platform.system() == "Darwin": subprocess.check_call(f"codesign --deep --force --sign - {target}", shell=True) -print("+ Installing background.tiff +") - -bg_path = os.path.join('dist', '.background', 'background.tiff') -os.mkdir(os.path.dirname(bg_path)) - -tiff_path = os.path.join('contrib', 'macdeploy', 'background.tiff') -shutil.copy2(tiff_path, bg_path) - -# ------------------------------------------------ - -print("+ Generating symlink for /Applications +") - -os.symlink("/Applications", os.path.join('dist', "Applications")) - # ------------------------------------------------ -if config.dmg is not None: - - print("+ Preparing .dmg disk image +") - - if verbose: - print("Determining size of \"dist\"...") - size = 0 - for path, dirs, files in os.walk("dist"): - for file in files: - size += os.path.getsize(os.path.join(path, file)) - size += int(size * 0.15) - - if verbose: - print("Creating temp image for modification...") - - tempname: str = appname + ".temp.dmg" - - run(["hdiutil", "create", tempname, "-srcfolder", "dist", "-format", "UDRW", "-size", str(size), "-volname", appname], check=True, text=True) - - if verbose: - print("Attaching temp image...") - output = run(["hdiutil", "attach", tempname, "-readwrite"], check=True, text=True, stdout=PIPE).stdout - - print("+ Finalizing .dmg disk image +") - - run(["hdiutil", "detach", f"/Volumes/{appname}"], text=True) - - run(["hdiutil", "convert", tempname, "-format", "UDZO", "-o", appname, "-imagekey", "zlib-level=9"], check=True, text=True) - - os.unlink(tempname) +if config.zip is not None: + shutil.make_archive('{}'.format(appname), format='zip', root_dir='dist', base_dir='Bitcoin-Qt.app') # ------------------------------------------------ diff --git a/contrib/message-capture/message-capture-parser.py b/contrib/message-capture/message-capture-parser.py index d6ddc1c149a27..0f409717d4c8a 100755 --- a/contrib/message-capture/message-capture-parser.py +++ b/contrib/message-capture/message-capture-parser.py @@ -11,7 +11,7 @@ from io import BytesIO import json from pathlib import Path -from typing import Any, List, Optional +from typing import Any, Optional sys.path.append(os.path.join(os.path.dirname(__file__), '../../test/functional')) @@ -92,7 +92,7 @@ def to_jsonable(obj: Any) -> Any: return obj -def process_file(path: str, messages: List[Any], recv: bool, progress_bar: Optional[ProgressBar]) -> None: +def process_file(path: str, messages: list[Any], recv: bool, progress_bar: Optional[ProgressBar]) -> None: with open(path, 'rb') as f_in: if progress_bar: bytes_read = 0 @@ -188,7 +188,7 @@ def main(): output = Path.cwd() / Path(args.output) if args.output else False use_progress_bar = (not args.no_progress_bar) and sys.stdout.isatty() - messages = [] # type: List[Any] + messages = [] # type: list[Any] if use_progress_bar: total_size = sum(capture.stat().st_size for capture in capturepaths) progress_bar = ProgressBar(total_size) diff --git a/contrib/seeds/asmap.py b/contrib/seeds/asmap.py index e28e5cf532e90..214805b5a5790 100644 --- a/contrib/seeds/asmap.py +++ b/contrib/seeds/asmap.py @@ -10,11 +10,12 @@ import ipaddress import random import unittest +from collections.abc import Callable, Iterable from enum import Enum from functools import total_ordering -from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union, overload +from typing import Optional, Union, overload -def net_to_prefix(net: Union[ipaddress.IPv4Network,ipaddress.IPv6Network]) -> List[bool]: +def net_to_prefix(net: Union[ipaddress.IPv4Network,ipaddress.IPv6Network]) -> list[bool]: """ Convert an IPv4 or IPv6 network to a prefix represented as a list of bits. @@ -32,7 +33,7 @@ def net_to_prefix(net: Union[ipaddress.IPv4Network,ipaddress.IPv6Network]) -> Li assert (netrange & ((1 << (128 - num_bits)) - 1)) == 0 return [((netrange >> (127 - i)) & 1) != 0 for i in range(num_bits)] -def prefix_to_net(prefix: List[bool]) -> Union[ipaddress.IPv4Network,ipaddress.IPv6Network]: +def prefix_to_net(prefix: list[bool]) -> Union[ipaddress.IPv4Network,ipaddress.IPv6Network]: """The reverse operation of net_to_prefix.""" # Convert to number netrange = sum(b << (127 - i) for i, b in enumerate(prefix)) @@ -47,10 +48,10 @@ def prefix_to_net(prefix: List[bool]) -> Union[ipaddress.IPv4Network,ipaddress.I return ipaddress.IPv6Network((netrange, num_bits), True) # Shortcut for (prefix, ASN) entries. -ASNEntry = Tuple[List[bool], int] +ASNEntry = tuple[list[bool], int] # Shortcut for (prefix, old ASN, new ASN) entries. -ASNDiff = Tuple[List[bool], int, int] +ASNDiff = tuple[list[bool], int, int] class _VarLenCoder: """ @@ -75,7 +76,7 @@ class _VarLenCoder: other classes start one past the last element of the class before it. """ - def __init__(self, minval: int, clsbits: List[int]): + def __init__(self, minval: int, clsbits: list[int]): """Construct a new _VarLenCoder.""" self._minval = minval self._clsbits = clsbits @@ -85,7 +86,7 @@ def can_encode(self, val: int) -> bool: """Check whether value val is in the range this coder supports.""" return self._minval <= val <= self._maxval - def encode(self, val: int, ret: List[int]) -> None: + def encode(self, val: int, ret: list[int]) -> None: """Append encoding of val onto integer list ret.""" assert self._minval <= val <= self._maxval @@ -120,7 +121,7 @@ def encode_size(self, val: int) -> int: break return ret + bits - def decode(self, stream, bitpos) -> Tuple[int,int]: + def decode(self, stream, bitpos) -> tuple[int,int]: """Decode a number starting at bitpos in stream, returning value and new bitpos.""" val = self._minval bits = 0 @@ -281,11 +282,11 @@ class ASMap: - mappings, represented by new trie nodes. """ - def update(self, prefix: List[bool], asn: int) -> None: + def update(self, prefix: list[bool], asn: int) -> None: """Update this ASMap object to map prefix to the specified asn.""" assert asn == 0 or _CODER_ASN.can_encode(asn) - def recurse(node: List, offset: int) -> None: + def recurse(node: list, offset: int) -> None: if offset == len(prefix): # Reached the end of prefix; overwrite this node. node.clear() @@ -306,7 +307,7 @@ def recurse(node: List, offset: int) -> None: node.append(oldasn) recurse(self._trie, 0) - def update_multi(self, entries: List[Tuple[List[bool], int]]) -> None: + def update_multi(self, entries: list[tuple[list[bool], int]]) -> None: """Apply multiple update operations, where longer prefixes take precedence.""" entries.sort(key=lambda entry: len(entry[0])) for prefix, asn in entries: @@ -314,7 +315,7 @@ def update_multi(self, entries: List[Tuple[List[bool], int]]) -> None: def _set_trie(self, trie) -> None: """Set trie directly. Internal use only.""" - def recurse(node: List) -> None: + def recurse(node: list) -> None: if len(node) < 2: return recurse(node[0]) @@ -342,7 +343,7 @@ def entry_key(entry): for prefix, asn in sorted(entries, key=entry_key): self.update(prefix, asn) - def lookup(self, prefix: List[bool]) -> Optional[int]: + def lookup(self, prefix: list[bool]) -> Optional[int]: """Look up a prefix. Returns ASN, or 0 if unassigned, or None if indeterminate.""" node = self._trie for bit in prefix: @@ -353,11 +354,11 @@ def lookup(self, prefix: List[bool]) -> Optional[int]: return node[0] return None - def _to_entries_flat(self, fill: bool = False) -> List[ASNEntry]: + def _to_entries_flat(self, fill: bool = False) -> list[ASNEntry]: """Convert an ASMap object to a list of non-overlapping (prefix, asn) objects.""" - prefix : List[bool] = [] + prefix : list[bool] = [] - def recurse(node: List) -> List[ASNEntry]: + def recurse(node: list) -> list[ASNEntry]: ret = [] if len(node) == 1: if node[0] > 0: @@ -375,24 +376,24 @@ def recurse(node: List) -> List[ASNEntry]: return ret return recurse(self._trie) - def _to_entries_minimal(self, fill: bool = False) -> List[ASNEntry]: + def _to_entries_minimal(self, fill: bool = False) -> list[ASNEntry]: """Convert a trie to a minimal list of ASNEntry objects, exploiting overlap.""" - prefix : List[bool] = [] + prefix : list[bool] = [] - def recurse(node: List) -> (Tuple[Dict[Optional[int], List[ASNEntry]], bool]): + def recurse(node: list) -> (tuple[dict[Optional[int], list[ASNEntry]], bool]): if len(node) == 1 and node[0] == 0: return {None if fill else 0: []}, True if len(node) == 1: return {node[0]: [], None: [(list(prefix), node[0])]}, False - ret: Dict[Optional[int], List[ASNEntry]] = {} + ret: dict[Optional[int], list[ASNEntry]] = {} prefix.append(False) left, lhole = recurse(node[0]) prefix[-1] = True right, rhole = recurse(node[1]) prefix.pop() hole = not fill and (lhole or rhole) - def candidate(ctx: Optional[int], res0: Optional[List[ASNEntry]], - res1: Optional[List[ASNEntry]]): + def candidate(ctx: Optional[int], res0: Optional[list[ASNEntry]], + res1: Optional[list[ASNEntry]]): if res0 is not None and res1 is not None: if ctx not in ret or len(res0) + len(res1) < len(ret[ctx]): ret[ctx] = res0 + res1 @@ -417,7 +418,7 @@ def __str__(self) -> str: """Convert this ASMap object to a string containing Python code constructing it.""" return f"ASMap({self._trie})" - def to_entries(self, overlapping: bool = True, fill: bool = False) -> List[ASNEntry]: + def to_entries(self, overlapping: bool = True, fill: bool = False) -> list[ASNEntry]: """ Convert the mappings in this ASMap object to a list of ASNEntry objects. @@ -448,7 +449,7 @@ def from_random(num_leaves: int = 10, max_asn: int = 6, assert max_asn >= 1 or unassigned_prob == 1 assert _CODER_ASN.can_encode(max_asn) assert 0.0 <= unassigned_prob <= 1.0 - trie: List = [] + trie: list = [] leaves = [trie] ret = ASMap() for i in range(1, num_leaves): @@ -472,12 +473,12 @@ def from_random(num_leaves: int = 10, max_asn: int = 6, def _to_binnode(self, fill: bool = False) -> _BinNode: """Convert a trie to a _BinNode object.""" - def recurse(node: List) -> Tuple[Dict[Optional[int], _BinNode], bool]: + def recurse(node: list) -> tuple[dict[Optional[int], _BinNode], bool]: if len(node) == 1 and node[0] == 0: return {(None if fill else 0): _BinNode.make_end()}, True if len(node) == 1: return {None: _BinNode.make_leaf(node[0]), node[0]: _BinNode.make_end()}, False - ret: Dict[Optional[int], _BinNode] = {} + ret: dict[Optional[int], _BinNode] = {} left, lhole = recurse(node[0]) right, rhole = recurse(node[1]) hole = (lhole or rhole) and not fill @@ -507,7 +508,7 @@ def candidate(ctx: Optional[int], arg1, arg2, func: Callable): @staticmethod def _from_binnode(binnode: _BinNode) -> "ASMap": """Construct an ASMap object from a _BinNode. Internal use only.""" - def recurse(node: _BinNode, default: int) -> List: + def recurse(node: _BinNode, default: int) -> list: if node.ins == _Instruction.RETURN: return [node.arg1] if node.ins == _Instruction.JUMP: @@ -542,7 +543,7 @@ def to_binary(self, fill: bool = False) -> bytes: Returns: A bytes object with the encoding of this ASMap object. """ - bits: List[int] = [] + bits: list[int] = [] def recurse(node: _BinNode) -> None: _CODER_INS.encode(node.ins.value, bits) @@ -582,11 +583,11 @@ def recurse(node: _BinNode) -> None: def from_binary(bindata: bytes) -> Optional["ASMap"]: """Decode an ASMap object from the provided binary encoding.""" - bits: List[int] = [] + bits: list[int] = [] for byte in bindata: bits.extend((byte >> i) & 1 for i in range(8)) - def recurse(bitpos: int) -> Tuple[_BinNode, int]: + def recurse(bitpos: int) -> tuple[_BinNode, int]: insval, bitpos = _CODER_INS.decode(bits, bitpos) ins = _Instruction(insval) if ins == _Instruction.RETURN: @@ -632,7 +633,7 @@ def __eq__(self, other: object) -> bool: def extends(self, req: "ASMap") -> bool: """Determine whether this matches req for all subranges where req is assigned.""" - def recurse(actual: List, require: List) -> bool: + def recurse(actual: list, require: list) -> bool: if len(require) == 1 and require[0] == 0: return True if len(require) == 1: @@ -646,20 +647,20 @@ def recurse(actual: List, require: List) -> bool: #pylint: disable=protected-access return recurse(self._trie, req._trie) - def diff(self, other: "ASMap") -> List[ASNDiff]: + def diff(self, other: "ASMap") -> list[ASNDiff]: """Compute the diff from self to other.""" - prefix: List[bool] = [] - ret: List[ASNDiff] = [] + prefix: list[bool] = [] + ret: list[ASNDiff] = [] - def recurse(old_node: List, new_node: List): + def recurse(old_node: list, new_node: list): if len(old_node) == 1 and len(new_node) == 1: if old_node[0] != new_node[0]: ret.append((list(prefix), old_node[0], new_node[0])) else: - old_left: List = old_node if len(old_node) == 1 else old_node[0] - old_right: List = old_node if len(old_node) == 1 else old_node[1] - new_left: List = new_node if len(new_node) == 1 else new_node[0] - new_right: List = new_node if len(new_node) == 1 else new_node[1] + old_left: list = old_node if len(old_node) == 1 else old_node[0] + old_right: list = old_node if len(old_node) == 1 else old_node[1] + new_left: list = new_node if len(new_node) == 1 else new_node[0] + new_right: list = new_node if len(new_node) == 1 else new_node[1] prefix.append(False) recurse(old_left, new_left) prefix[-1] = True @@ -760,7 +761,7 @@ def test_patching(self) -> None: # It starts off being equal to asmap. patched = copy.copy(asmap) # Keep a list of patches performed. - patches: List[ASNEntry] = [] + patches: list[ASNEntry] = [] # Initially there cannot be any difference. self.assertEqual(asmap.diff(patched), []) # Make 5 patches, each building on top of the previous ones. diff --git a/contrib/seeds/makeseeds.py b/contrib/seeds/makeseeds.py index af408c2df53ab..f03c2ab5e808f 100755 --- a/contrib/seeds/makeseeds.py +++ b/contrib/seeds/makeseeds.py @@ -11,7 +11,7 @@ import ipaddress import re import sys -from typing import List, Dict, Union +from typing import Union from asmap import ASMap, net_to_prefix @@ -117,14 +117,14 @@ def parseline(line: str) -> Union[dict, None]: 'sortkey': sortkey, } -def dedup(ips: List[Dict]) -> List[Dict]: +def dedup(ips: list[dict]) -> list[dict]: """ Remove duplicates from `ips` where multiple ips share address and port. """ d = {} for ip in ips: d[ip['ip'],ip['port']] = ip return list(d.values()) -def filtermultiport(ips: List[Dict]) -> List[Dict]: +def filtermultiport(ips: list[dict]) -> list[dict]: """ Filter out hosts with more nodes per IP""" hist = collections.defaultdict(list) for ip in ips: @@ -132,7 +132,7 @@ def filtermultiport(ips: List[Dict]) -> List[Dict]: return [value[0] for (key,value) in list(hist.items()) if len(value)==1] # Based on Greg Maxwell's seed_filter.py -def filterbyasn(asmap: ASMap, ips: List[Dict], max_per_asn: Dict, max_per_net: int) -> List[Dict]: +def filterbyasn(asmap: ASMap, ips: list[dict], max_per_asn: dict, max_per_net: int) -> list[dict]: """ Prunes `ips` by (a) trimming ips to have at most `max_per_net` ips from each net (e.g. ipv4, ipv6); and (b) trimming ips to have at most `max_per_asn` ips from each asn in each net. @@ -143,8 +143,8 @@ def filterbyasn(asmap: ASMap, ips: List[Dict], max_per_asn: Dict, max_per_net: i # Filter IPv46 by ASN, and limit to max_per_net per network result = [] - net_count: Dict[str, int] = collections.defaultdict(int) - asn_count: Dict[int, int] = collections.defaultdict(int) + net_count: dict[str, int] = collections.defaultdict(int) + asn_count: dict[int, int] = collections.defaultdict(int) for i, ip in enumerate(ips_ipv46): if net_count[ip['net']] == max_per_net: @@ -165,9 +165,9 @@ def filterbyasn(asmap: ASMap, ips: List[Dict], max_per_asn: Dict, max_per_net: i result.extend(ips_onion[0:max_per_net]) return result -def ip_stats(ips: List[Dict]) -> str: +def ip_stats(ips: list[dict]) -> str: """ Format and return pretty string from `ips`. """ - hist: Dict[str, int] = collections.defaultdict(int) + hist: dict[str, int] = collections.defaultdict(int) for ip in ips: if ip is not None: hist[ip['net']] += 1 diff --git a/contrib/signet/miner b/contrib/signet/miner index 61d9f62be7184..e5daf9f993eaa 100755 --- a/contrib/signet/miner +++ b/contrib/signet/miner @@ -30,7 +30,7 @@ logging.basicConfig( SIGNET_HEADER = b"\xec\xc7\xda\xa2" PSBT_SIGNET_BLOCK = b"\xfc\x06signetb" # proprietary PSBT global field holding the block being signed -RE_MULTIMINER = re.compile("^(\d+)(-(\d+))?/(\d+)$") +RE_MULTIMINER = re.compile(r"^(\d+)(-(\d+))?/(\d+)$") def create_coinbase(height, value, spk): cb = CTransaction() diff --git a/contrib/tracing/mempool_monitor.py b/contrib/tracing/mempool_monitor.py index 9d427d4632a46..afb5e603722b1 100755 --- a/contrib/tracing/mempool_monitor.py +++ b/contrib/tracing/mempool_monitor.py @@ -27,7 +27,7 @@ struct added_event { u8 hash[HASH_LENGTH]; - u64 vsize; + s32 vsize; s64 fee; }; @@ -35,7 +35,7 @@ { u8 hash[HASH_LENGTH]; char reason[MAX_REMOVAL_REASON_LENGTH]; - u64 vsize; + s32 vsize; s64 fee; u64 entry_time; }; @@ -49,11 +49,11 @@ struct replaced_event { u8 replaced_hash[HASH_LENGTH]; - u64 replaced_vsize; + s32 replaced_vsize; s64 replaced_fee; u64 replaced_entry_time; u8 replacement_hash[HASH_LENGTH]; - u64 replacement_vsize; + s32 replacement_vsize; s64 replacement_fee; }; diff --git a/contrib/valgrind.supp b/contrib/valgrind.supp index faba1f6ae6cbd..ee91acd5ef4b7 100644 --- a/contrib/valgrind.supp +++ b/contrib/valgrind.supp @@ -89,3 +89,9 @@ ... fun:_ZN5BCLog6Logger12StartLoggingEv } +{ + Suppress https://bugs.kde.org/show_bug.cgi?id=472219 - fixed in Valgrind 3.22. + Memcheck:Param + ppoll(ufds.events) + obj:/lib/ld-musl-aarch64.so.1 +} diff --git a/contrib/verify-binaries/README.md b/contrib/verify-binaries/README.md index c62d760e1a7ee..04d683e69b559 100644 --- a/contrib/verify-binaries/README.md +++ b/contrib/verify-binaries/README.md @@ -17,7 +17,7 @@ must obtain that key for your local GPG installation. You can obtain these keys by - through a browser using a key server (e.g. keyserver.ubuntu.com), - manually using the `gpg --keyserver --recv-keys ` command, or - - you can run the packaged `verify.py ... --import-keys` script to + - you can run the packaged `verify.py --import-keys ...` script to have it automatically retrieve unrecognized keys. #### Usage diff --git a/contrib/verify-binaries/verify.py b/contrib/verify-binaries/verify.py index d0749f503f865..12e6e10d8a4de 100755 --- a/contrib/verify-binaries/verify.py +++ b/contrib/verify-binaries/verify.py @@ -122,7 +122,7 @@ def download_with_wget(remote_file, local_file): return result.returncode == 0, result.stdout.decode().rstrip() -def download_lines_with_urllib(url) -> t.Tuple[bool, t.List[str]]: +def download_lines_with_urllib(url) -> tuple[bool, list[str]]: """Get (success, text lines of a file) over HTTP.""" try: return (True, [ @@ -138,7 +138,7 @@ def verify_with_gpg( filename, signature_filename, output_filename: t.Optional[str] = None -) -> t.Tuple[int, str]: +) -> tuple[int, str]: with tempfile.NamedTemporaryFile() as status_file: args = [ 'gpg', '--yes', '--verify', '--verify-options', 'show-primary-uid-only', "--status-file", status_file.name, @@ -177,12 +177,12 @@ def __repr__(self): def parse_gpg_result( - output: t.List[str] -) -> t.Tuple[t.List[SigData], t.List[SigData], t.List[SigData]]: + output: list[str] +) -> tuple[list[SigData], list[SigData], list[SigData]]: """Returns good, unknown, and bad signatures from GPG stdout.""" - good_sigs: t.List[SigData] = [] - unknown_sigs: t.List[SigData] = [] - bad_sigs: t.List[SigData] = [] + good_sigs: list[SigData] = [] + unknown_sigs: list[SigData] = [] + bad_sigs: list[SigData] = [] total_resolved_sigs = 0 # Ensure that all lines we match on include a prefix that prevents malicious input @@ -265,7 +265,7 @@ def files_are_equal(filename1, filename2): def get_files_from_hosts_and_compare( - hosts: t.List[str], path: str, filename: str, require_all: bool = False + hosts: list[str], path: str, filename: str, require_all: bool = False ) -> ReturnCode: """ Retrieve the same file from a number of hosts and ensure they have the same contents. @@ -326,7 +326,7 @@ def join_url(host: str) -> str: return ReturnCode.SUCCESS -def check_multisig(sums_file: str, sigfilename: str, args: argparse.Namespace) -> t.Tuple[int, str, t.List[SigData], t.List[SigData], t.List[SigData]]: +def check_multisig(sums_file: str, sigfilename: str, args: argparse.Namespace) -> tuple[int, str, list[SigData], list[SigData], list[SigData]]: # check signature # # We don't write output to a file because this command will almost certainly @@ -365,8 +365,8 @@ def prompt_yn(prompt) -> bool: def verify_shasums_signature( signature_file_path: str, sums_file_path: str, args: argparse.Namespace -) -> t.Tuple[ - ReturnCode, t.List[SigData], t.List[SigData], t.List[SigData], t.List[SigData] +) -> tuple[ + ReturnCode, list[SigData], list[SigData], list[SigData], list[SigData] ]: min_good_sigs = args.min_good_sigs gpg_allowed_codes = [0, 2] # 2 is returned when untrusted signatures are present. @@ -429,14 +429,14 @@ def verify_shasums_signature( return (ReturnCode.SUCCESS, good_trusted, good_untrusted, unknown, bad) -def parse_sums_file(sums_file_path: str, filename_filter: t.List[str]) -> t.List[t.List[str]]: +def parse_sums_file(sums_file_path: str, filename_filter: list[str]) -> list[list[str]]: # extract hashes/filenames of binaries to verify from hash file; # each line has the following format: " " with open(sums_file_path, 'r', encoding='utf8') as hash_file: return [line.split()[:2] for line in hash_file if len(filename_filter) == 0 or any(f in line for f in filename_filter)] -def verify_binary_hashes(hashes_to_verify: t.List[t.List[str]]) -> t.Tuple[ReturnCode, t.Dict[str, str]]: +def verify_binary_hashes(hashes_to_verify: list[list[str]]) -> tuple[ReturnCode, dict[str, str]]: offending_files = [] files_to_hashes = {} diff --git a/depends/Makefile b/depends/Makefile index 68be151df33dc..3f335ac7ad8af 100644 --- a/depends/Makefile +++ b/depends/Makefile @@ -47,7 +47,7 @@ NO_HARDEN ?= FALLBACK_DOWNLOAD_PATH ?= https://bitcoincore.org/depends-sources C_STANDARD ?= c11 -CXX_STANDARD ?= c++17 +CXX_STANDARD ?= c++20 BUILD = $(shell ./config.guess) HOST ?= $(BUILD) @@ -145,8 +145,8 @@ include packages/packages.mk # 2. Before including packages/*.mk (excluding packages/packages.mk), since # they rely on the build_id variables # -build_id:=$(shell env CC='$(build_CC)' C_STANDARD='$(C_STANDARD)' CXX='$(build_CXX)' CXX_STANDARD='$(CXX_STANDARD)' AR='$(build_AR)' RANLIB='$(build_RANLIB)' STRIP='$(build_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' LTO='$(LTO)' NO_HARDEN='$(NO_HARDEN)' ./gen_id '$(BUILD_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))') -$(host_arch)_$(host_os)_id:=$(shell env CC='$(host_CC)' C_STANDARD='$(C_STANDARD)' CXX='$(host_CXX)' CXX_STANDARD='$(CXX_STANDARD)' AR='$(host_AR)' RANLIB='$(host_RANLIB)' STRIP='$(host_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' LTO='$(LTO)' NO_HARDEN='$(NO_HARDEN)' ./gen_id '$(HOST_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))') +build_id:=$(shell env CC='$(build_CC)' C_STANDARD='$(C_STANDARD)' CXX='$(build_CXX)' CXX_STANDARD='$(CXX_STANDARD)' AR='$(build_AR) 'NM='$(build_NM)' RANLIB='$(build_RANLIB)' STRIP='$(build_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' LTO='$(LTO)' NO_HARDEN='$(NO_HARDEN)' ./gen_id '$(BUILD_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))') +$(host_arch)_$(host_os)_id:=$(shell env CC='$(host_CC)' C_STANDARD='$(C_STANDARD)' CXX='$(host_CXX)' CXX_STANDARD='$(CXX_STANDARD)' AR='$(host_AR)' NM='$(host_NM)' RANLIB='$(host_RANLIB)' STRIP='$(host_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' LTO='$(LTO)' NO_HARDEN='$(NO_HARDEN)' ./gen_id '$(HOST_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))') boost_packages_$(NO_BOOST) = $(boost_packages) @@ -177,7 +177,7 @@ endif all_packages = $(packages) $(native_packages) -meta_depends = Makefile funcs.mk builders/default.mk hosts/default.mk hosts/$(host_os).mk builders/$(build_os).mk +meta_depends = Makefile config.guess config.sub funcs.mk builders/default.mk hosts/default.mk hosts/$(host_os).mk builders/$(build_os).mk $(host_arch)_$(host_os)_native_binutils?=$($(host_os)_native_binutils) $(host_arch)_$(host_os)_native_toolchain?=$($(host_os)_native_toolchain) @@ -236,7 +236,6 @@ $(host_prefix)/share/config.site : config.site.in $(host_prefix)/.stamp_$(final_ -e 's|@CXXFLAGS@|$(strip $(host_CXXFLAGS) $(host_$(release_type)_CXXFLAGS))|' \ -e 's|@CPPFLAGS@|$(strip $(host_CPPFLAGS) $(host_$(release_type)_CPPFLAGS))|' \ -e 's|@LDFLAGS@|$(strip $(host_LDFLAGS) $(host_$(release_type)_LDFLAGS))|' \ - -e 's|@allow_host_packages@|$(ALLOW_HOST_PACKAGES)|' \ -e 's|@no_zmq@|$(NO_ZMQ)|' \ -e 's|@no_wallet@|$(NO_WALLET)|' \ -e 's|@no_bdb@|$(NO_BDB)|' \ diff --git a/depends/README.md b/depends/README.md index f7c0fc0a5fc3b..718ee620ebd44 100644 --- a/depends/README.md +++ b/depends/README.md @@ -15,6 +15,7 @@ For example: **Bitcoin Core's `configure` script by default will ignore the depends output.** In order for it to pick up libraries, tools, and settings from the depends build, you must set the `CONFIG_SITE` environment variable to point to a `config.site` settings file. +Make sure that `CONFIG_SITE` is an absolute path. In the above example, a file named `depends/x86_64-w64-mingw32/share/config.site` will be created. To use it during compilation: @@ -47,7 +48,7 @@ The paths are automatically configured and no other options are needed unless ta #### For macOS cross compilation - sudo apt-get install curl bsdmainutils cmake libz-dev python3-setuptools libtinfo5 xorriso + sudo apt-get install curl bsdmainutils cmake zip Note: You must obtain the macOS SDK before proceeding with a cross-compile. Under the depends directory, create a subdirectory named `SDKs`. @@ -97,7 +98,7 @@ The following can be set when running make: `make FOO=bar` - `SDK_PATH`: Path where SDKs can be found (used by macOS) - `FALLBACK_DOWNLOAD_PATH`: If a source file can't be fetched, try here before giving up - `C_STANDARD`: Set the C standard version used. Defaults to `c11`. -- `CXX_STANDARD`: Set the C++ standard version used. Defaults to `c++17`. +- `CXX_STANDARD`: Set the C++ standard version used. Defaults to `c++20`. - `NO_BOOST`: Don't download/build/cache Boost - `NO_LIBEVENT`: Don't download/build/cache Libevent - `NO_ZMQ`: Don't download/build/cache packages needed for enabling ZeroMQ @@ -107,9 +108,6 @@ The following can be set when running make: `make FOO=bar` - `NO_UPNP`: Don't download/build/cache packages needed for enabling UPnP - `NO_NATPMP`: Don't download/build/cache packages needed for enabling NAT-PMP - `NO_USDT`: Don't download/build/cache packages needed for enabling USDT tracepoints -- `ALLOW_HOST_PACKAGES`: Packages that are missed in dependencies (due to `NO_*` option or - build script logic) are searched for among the host system packages using - `pkg-config`. It allows building with packages of other (newer) versions - `MULTIPROCESS`: Build libmultiprocess (experimental, requires CMake) - `DEBUG`: Disable some optimizations and enable more runtime checking - `HOST_ID_SALT`: Optional salt to use when generating host package ids @@ -120,7 +118,7 @@ The following can be set when running make: `make FOO=bar` - `LOG`: Use file-based logging for individual packages. During a package build its log file resides in the `depends` directory, and the log file is printed out automatically in case of build error. After successful build log files are moved along with package archives -- `LTO`: Use LTO when building packages. +- `LTO`: Enable options needed for LTO. Does not add `-flto` related options to *FLAGS. - `NO_HARDEN=1`: Don't use hardening options when building packages If some packages are not built, for example `make NO_WALLET=1`, the appropriate diff --git a/depends/builders/darwin.mk b/depends/builders/darwin.mk index 8ed82b276df9c..eb64c97f64780 100644 --- a/depends/builders/darwin.mk +++ b/depends/builders/darwin.mk @@ -11,8 +11,8 @@ build_darwin_SHA256SUM=shasum -a 256 build_darwin_DOWNLOAD=curl --location --fail --connect-timeout $(DOWNLOAD_CONNECT_TIMEOUT) --retry $(DOWNLOAD_RETRIES) -o #darwin host on darwin builder. overrides darwin host preferences. -darwin_CC=$(shell xcrun -f clang) -mmacosx-version-min=$(OSX_MIN_VERSION) -isysroot$(shell xcrun --show-sdk-path) -darwin_CXX:=$(shell xcrun -f clang++) -mmacosx-version-min=$(OSX_MIN_VERSION) -stdlib=libc++ -isysroot$(shell xcrun --show-sdk-path) +darwin_CC=$(shell xcrun -f clang) -isysroot$(shell xcrun --show-sdk-path) +darwin_CXX:=$(shell xcrun -f clang++) -stdlib=libc++ -isysroot$(shell xcrun --show-sdk-path) darwin_AR:=$(shell xcrun -f ar) darwin_RANLIB:=$(shell xcrun -f ranlib) darwin_STRIP:=$(shell xcrun -f strip) diff --git a/depends/config.guess b/depends/config.guess index 69188da73d743..cdfc4392047ce 100755 --- a/depends/config.guess +++ b/depends/config.guess @@ -4,7 +4,7 @@ # shellcheck disable=SC2006,SC2268 # see below for rationale -timestamp='2023-01-01' +timestamp='2023-08-22' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by @@ -47,7 +47,7 @@ me=`echo "$0" | sed -e 's,.*/,,'` usage="\ Usage: $0 [OPTION] -Output the configuration name of the system \`$me' is run on. +Output the configuration name of the system '$me' is run on. Options: -h, --help print this help, then exit @@ -66,7 +66,7 @@ This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" -Try \`$me --help' for more information." +Try '$me --help' for more information." # Parse command line while test $# -gt 0 ; do @@ -102,8 +102,8 @@ GUESS= # temporary files to be created and, as you can see below, it is a # headache to deal with in a portable fashion. -# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still -# use `HOST_CC' if defined, but it is deprecated. +# Historically, 'CC_FOR_BUILD' used to be named 'HOST_CC'. We still +# use 'HOST_CC' if defined, but it is deprecated. # Portable tmp directory creation inspired by the Autoconf team. @@ -155,6 +155,9 @@ Linux|GNU|GNU/*) set_cc_for_build cat <<-EOF > "$dummy.c" + #if defined(__ANDROID__) + LIBC=android + #else #include #if defined(__UCLIBC__) LIBC=uclibc @@ -169,6 +172,7 @@ Linux|GNU|GNU/*) LIBC=musl #endif #endif + #endif EOF cc_set_libc=`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g'` eval "$cc_set_libc" @@ -459,7 +463,7 @@ case $UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION in UNAME_RELEASE=`uname -v` ;; esac - # Japanese Language versions have a version number like `4.1.3-JL'. + # Japanese Language versions have a version number like '4.1.3-JL'. SUN_REL=`echo "$UNAME_RELEASE" | sed -e 's/-/_/'` GUESS=sparc-sun-sunos$SUN_REL ;; @@ -904,7 +908,7 @@ EOF fi ;; *:FreeBSD:*:*) - UNAME_PROCESSOR=`/usr/bin/uname -p` + UNAME_PROCESSOR=`uname -p` case $UNAME_PROCESSOR in amd64) UNAME_PROCESSOR=x86_64 ;; @@ -976,7 +980,27 @@ EOF GUESS=$UNAME_MACHINE-unknown-minix ;; aarch64:Linux:*:*) - GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + set_cc_for_build + CPU=$UNAME_MACHINE + LIBCABI=$LIBC + if test "$CC_FOR_BUILD" != no_compiler_found; then + ABI=64 + sed 's/^ //' << EOF > "$dummy.c" + #ifdef __ARM_EABI__ + #ifdef __ARM_PCS_VFP + ABI=eabihf + #else + ABI=eabi + #endif + #endif +EOF + cc_set_abi=`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^ABI' | sed 's, ,,g'` + eval "$cc_set_abi" + case $ABI in + eabi | eabihf) CPU=armv8l; LIBCABI=$LIBC$ABI ;; + esac + fi + GUESS=$CPU-unknown-linux-$LIBCABI ;; aarch64_be:Linux:*:*) UNAME_MACHINE=aarch64_be @@ -1042,6 +1066,15 @@ EOF k1om:Linux:*:*) GUESS=$UNAME_MACHINE-unknown-linux-$LIBC ;; + kvx:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + kvx:cos:*:*) + GUESS=$UNAME_MACHINE-unknown-cos + ;; + kvx:mbr:*:*) + GUESS=$UNAME_MACHINE-unknown-mbr + ;; loongarch32:Linux:*:* | loongarch64:Linux:*:*) GUESS=$UNAME_MACHINE-unknown-linux-$LIBC ;; @@ -1197,7 +1230,7 @@ EOF GUESS=$UNAME_MACHINE-pc-sysv4.2uw$UNAME_VERSION ;; i*86:OS/2:*:*) - # If we were able to find `uname', then EMX Unix compatibility + # If we were able to find 'uname', then EMX Unix compatibility # is probably installed. GUESS=$UNAME_MACHINE-pc-os2-emx ;; @@ -1338,7 +1371,7 @@ EOF GUESS=ns32k-sni-sysv fi ;; - PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + PENTIUM:*:4.0*:*) # Unisys 'ClearPath HMP IX 4000' SVR4/MP effort # says GUESS=i586-unisys-sysv4 ;; diff --git a/depends/config.site.in b/depends/config.site.in index 05c2ccbac104e..29b2a67ed0b0e 100644 --- a/depends/config.site.in +++ b/depends/config.site.in @@ -78,10 +78,6 @@ if test "@host_os@" = darwin; then BREW=no fi -if test -z "$enable_lto" && test -n "@lto@"; then - enable_lto=yes -fi - if test -z "$enable_hardening" && test -n "@no_harden@"; then enable_hardening=no fi @@ -89,9 +85,7 @@ fi PKG_CONFIG="$(which pkg-config) --static" PKG_CONFIG_PATH="${depends_prefix}/share/pkgconfig:${depends_prefix}/lib/pkgconfig" -if test -z "@allow_host_packages@"; then - PKG_CONFIG_LIBDIR="${depends_prefix}/lib/pkgconfig" -fi +PKG_CONFIG_LIBDIR="${depends_prefix}/lib/pkgconfig" CPPFLAGS="-I${depends_prefix}/include/ ${CPPFLAGS}" LDFLAGS="-L${depends_prefix}/lib ${LDFLAGS}" @@ -102,7 +96,6 @@ fi if test -n "@CXX@" -a -z "${CXX}"; then CXX="@CXX@" fi -PYTHONPATH="${depends_prefix}/native/lib/python3/dist-packages${PYTHONPATH:+${PATH_SEPARATOR}}${PYTHONPATH}" if test -n "@AR@"; then AR="@AR@" diff --git a/depends/config.sub b/depends/config.sub index de4259e404797..defe52c0c874b 100755 --- a/depends/config.sub +++ b/depends/config.sub @@ -4,7 +4,7 @@ # shellcheck disable=SC2006,SC2268 # see below for rationale -timestamp='2023-01-21' +timestamp='2023-09-19' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by @@ -82,7 +82,7 @@ This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" -Try \`$me --help' for more information." +Try '$me --help' for more information." # Parse command line while test $# -gt 0 ; do @@ -130,7 +130,7 @@ IFS=$saved_IFS # Separate into logical components for further validation case $1 in *-*-*-*-*) - echo Invalid configuration \`"$1"\': more than four components >&2 + echo "Invalid configuration '$1': more than four components" >&2 exit 1 ;; *-*-*-*) @@ -145,7 +145,8 @@ case $1 in nto-qnx* | linux-* | uclinux-uclibc* \ | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* \ | netbsd*-eabi* | kopensolaris*-gnu* | cloudabi*-eabi* \ - | storm-chaos* | os2-emx* | rtmk-nova* | managarm-*) + | storm-chaos* | os2-emx* | rtmk-nova* | managarm-* \ + | windows-* ) basic_machine=$field1 basic_os=$maybe_os ;; @@ -943,7 +944,7 @@ $basic_machine EOF IFS=$saved_IFS ;; - # We use `pc' rather than `unknown' + # We use 'pc' rather than 'unknown' # because (1) that's what they normally are, and # (2) the word "unknown" tends to confuse beginning users. i*86 | x86_64) @@ -1180,7 +1181,7 @@ case $cpu-$vendor in case $cpu in 1750a | 580 \ | a29k \ - | aarch64 | aarch64_be \ + | aarch64 | aarch64_be | aarch64c | arm64ec \ | abacus \ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] \ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] \ @@ -1199,12 +1200,14 @@ case $cpu-$vendor in | d10v | d30v | dlx | dsp16xx \ | e2k | elxsi | epiphany \ | f30[01] | f700 | fido | fr30 | frv | ft32 | fx80 \ + | javascript \ | h8300 | h8500 \ | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ | hexagon \ | i370 | i*86 | i860 | i960 | ia16 | ia64 \ | ip2k | iq2000 \ | k1om \ + | kvx \ | le32 | le64 \ | lm32 \ | loongarch32 | loongarch64 \ @@ -1213,31 +1216,7 @@ case $cpu-$vendor in | m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x \ | m88110 | m88k | maxq | mb | mcore | mep | metag \ | microblaze | microblazeel \ - | mips | mipsbe | mipseb | mipsel | mipsle \ - | mips16 \ - | mips64 | mips64eb | mips64el \ - | mips64octeon | mips64octeonel \ - | mips64orion | mips64orionel \ - | mips64r5900 | mips64r5900el \ - | mips64vr | mips64vrel \ - | mips64vr4100 | mips64vr4100el \ - | mips64vr4300 | mips64vr4300el \ - | mips64vr5000 | mips64vr5000el \ - | mips64vr5900 | mips64vr5900el \ - | mipsisa32 | mipsisa32el \ - | mipsisa32r2 | mipsisa32r2el \ - | mipsisa32r3 | mipsisa32r3el \ - | mipsisa32r5 | mipsisa32r5el \ - | mipsisa32r6 | mipsisa32r6el \ - | mipsisa64 | mipsisa64el \ - | mipsisa64r2 | mipsisa64r2el \ - | mipsisa64r3 | mipsisa64r3el \ - | mipsisa64r5 | mipsisa64r5el \ - | mipsisa64r6 | mipsisa64r6el \ - | mipsisa64sb1 | mipsisa64sb1el \ - | mipsisa64sr71k | mipsisa64sr71kel \ - | mipsr5900 | mipsr5900el \ - | mipstx39 | mipstx39el \ + | mips* \ | mmix \ | mn10200 | mn10300 \ | moxie \ @@ -1285,7 +1264,7 @@ case $cpu-$vendor in ;; *) - echo Invalid configuration \`"$1"\': machine \`"$cpu-$vendor"\' not recognized 1>&2 + echo "Invalid configuration '$1': machine '$cpu-$vendor' not recognized" 1>&2 exit 1 ;; esac @@ -1306,11 +1285,12 @@ esac # Decode manufacturer-specific aliases for certain operating systems. -if test x$basic_os != x +if test x"$basic_os" != x then # First recognize some ad-hoc cases, or perhaps split kernel-os, or else just # set os. +obj= case $basic_os in gnu/linux*) kernel=linux @@ -1510,10 +1490,16 @@ case $os in os=eabi ;; *) - os=elf + os= + obj=elf ;; esac ;; + aout* | coff* | elf* | pe*) + # These are machine code file formats, not OSes + obj=$os + os= + ;; *) # No normalization, but not necessarily accepted, that comes below. ;; @@ -1532,12 +1518,15 @@ else # system, and we'll never get to this point. kernel= +obj= case $cpu-$vendor in score-*) - os=elf + os= + obj=elf ;; spu-*) - os=elf + os= + obj=elf ;; *-acorn) os=riscix1.2 @@ -1547,28 +1536,35 @@ case $cpu-$vendor in os=gnu ;; arm*-semi) - os=aout + os= + obj=aout ;; c4x-* | tic4x-*) - os=coff + os= + obj=coff ;; c8051-*) - os=elf + os= + obj=elf ;; clipper-intergraph) os=clix ;; hexagon-*) - os=elf + os= + obj=elf ;; tic54x-*) - os=coff + os= + obj=coff ;; tic55x-*) - os=coff + os= + obj=coff ;; tic6x-*) - os=coff + os= + obj=coff ;; # This must come before the *-dec entry. pdp10-*) @@ -1590,19 +1586,24 @@ case $cpu-$vendor in os=sunos3 ;; m68*-cisco) - os=aout + os= + obj=aout ;; mep-*) - os=elf + os= + obj=elf ;; mips*-cisco) - os=elf + os= + obj=elf ;; mips*-*) - os=elf + os= + obj=elf ;; or32-*) - os=coff + os= + obj=coff ;; *-tti) # must be before sparc entry or we get the wrong os. os=sysv3 @@ -1611,7 +1612,8 @@ case $cpu-$vendor in os=sunos4.1.1 ;; pru-*) - os=elf + os= + obj=elf ;; *-be) os=beos @@ -1692,10 +1694,12 @@ case $cpu-$vendor in os=uxpv ;; *-rom68k) - os=coff + os= + obj=coff ;; *-*bug) - os=coff + os= + obj=coff ;; *-apple) os=macos @@ -1713,7 +1717,8 @@ esac fi -# Now, validate our (potentially fixed-up) OS. +# Now, validate our (potentially fixed-up) individual pieces (OS, OBJ). + case $os in # Sometimes we do "kernel-libc", so those need to count as OSes. musl* | newlib* | relibc* | uclibc*) @@ -1724,6 +1729,9 @@ case $os in # VxWorks passes extra cpu info in the 4th filed. simlinux | simwindows | spe) ;; + # See `case $cpu-$os` validation below + ghcjs) + ;; # Now accept the basic system types. # The portable systems comes first. # Each alternative MUST end in a * to match a version number. @@ -1732,7 +1740,7 @@ case $os in | hpux* | unos* | osf* | luna* | dgux* | auroraux* | solaris* \ | sym* | plan9* | psp* | sim* | xray* | os68k* | v88r* \ | hiux* | abug | nacl* | netware* | windows* \ - | os9* | macos* | osx* | ios* \ + | os9* | macos* | osx* | ios* | tvos* | watchos* \ | mpw* | magic* | mmixware* | mon960* | lnews* \ | amigaos* | amigados* | msdos* | newsos* | unicos* | aof* \ | aos* | aros* | cloudabi* | sortix* | twizzler* \ @@ -1741,11 +1749,11 @@ case $os in | mirbsd* | netbsd* | dicos* | openedition* | ose* \ | bitrig* | openbsd* | secbsd* | solidbsd* | libertybsd* | os108* \ | ekkobsd* | freebsd* | riscix* | lynxos* | os400* \ - | bosx* | nextstep* | cxux* | aout* | elf* | oabi* \ - | ptx* | coff* | ecoff* | winnt* | domain* | vsta* \ + | bosx* | nextstep* | cxux* | oabi* \ + | ptx* | ecoff* | winnt* | domain* | vsta* \ | udi* | lites* | ieee* | go32* | aux* | hcos* \ | chorusrdb* | cegcc* | glidix* | serenity* \ - | cygwin* | msys* | pe* | moss* | proelf* | rtems* \ + | cygwin* | msys* | moss* | proelf* | rtems* \ | midipix* | mingw32* | mingw64* | mint* \ | uxpv* | beos* | mpeix* | udk* | moxiebox* \ | interix* | uwin* | mks* | rhapsody* | darwin* \ @@ -1758,7 +1766,7 @@ case $os in | onefs* | tirtos* | phoenix* | fuchsia* | redox* | bme* \ | midnightbsd* | amdhsa* | unleashed* | emscripten* | wasi* \ | nsk* | powerunix* | genode* | zvmoe* | qnx* | emx* | zephyr* \ - | fiwix* | mlibc* ) + | fiwix* | mlibc* | cos* | mbr* ) ;; # This one is extra strict with allowed versions sco3.2v2 | sco3.2v[4-9]* | sco5v6*) @@ -1766,54 +1774,99 @@ case $os in ;; none) ;; - kernel* ) + kernel* | msvc* ) # Restricted further below ;; + '') + if test x"$obj" = x + then + echo "Invalid configuration '$1': Blank OS only allowed with explicit machine code file format" 1>&2 + fi + ;; + *) + echo "Invalid configuration '$1': OS '$os' not recognized" 1>&2 + exit 1 + ;; +esac + +case $obj in + aout* | coff* | elf* | pe*) + ;; + '') + # empty is fine + ;; *) - echo Invalid configuration \`"$1"\': OS \`"$os"\' not recognized 1>&2 + echo "Invalid configuration '$1': Machine code format '$obj' not recognized" 1>&2 + exit 1 + ;; +esac + +# Here we handle the constraint that a (synthetic) cpu and os are +# valid only in combination with each other and nowhere else. +case $cpu-$os in + # The "javascript-unknown-ghcjs" triple is used by GHC; we + # accept it here in order to tolerate that, but reject any + # variations. + javascript-ghcjs) + ;; + javascript-* | *-ghcjs) + echo "Invalid configuration '$1': cpu '$cpu' is not valid with os '$os$obj'" 1>&2 exit 1 ;; esac # As a final step for OS-related things, validate the OS-kernel combination # (given a valid OS), if there is a kernel. -case $kernel-$os in - linux-gnu* | linux-dietlibc* | linux-android* | linux-newlib* \ - | linux-musl* | linux-relibc* | linux-uclibc* | linux-mlibc* ) +case $kernel-$os-$obj in + linux-gnu*- | linux-dietlibc*- | linux-android*- | linux-newlib*- \ + | linux-musl*- | linux-relibc*- | linux-uclibc*- | linux-mlibc*- ) ;; - uclinux-uclibc* ) + uclinux-uclibc*- ) ;; - managarm-mlibc* | managarm-kernel* ) + managarm-mlibc*- | managarm-kernel*- ) ;; - -dietlibc* | -newlib* | -musl* | -relibc* | -uclibc* | -mlibc* ) + windows*-msvc*-) + ;; + -dietlibc*- | -newlib*- | -musl*- | -relibc*- | -uclibc*- | -mlibc*- ) # These are just libc implementations, not actual OSes, and thus # require a kernel. - echo "Invalid configuration \`$1': libc \`$os' needs explicit kernel." 1>&2 + echo "Invalid configuration '$1': libc '$os' needs explicit kernel." 1>&2 exit 1 ;; - -kernel* ) - echo "Invalid configuration \`$1': \`$os' needs explicit kernel." 1>&2 + -kernel*- ) + echo "Invalid configuration '$1': '$os' needs explicit kernel." 1>&2 exit 1 ;; - *-kernel* ) - echo "Invalid configuration \`$1': \`$kernel' does not support \`$os'." 1>&2 + *-kernel*- ) + echo "Invalid configuration '$1': '$kernel' does not support '$os'." 1>&2 exit 1 ;; - kfreebsd*-gnu* | kopensolaris*-gnu*) + *-msvc*- ) + echo "Invalid configuration '$1': '$os' needs 'windows'." 1>&2 + exit 1 ;; - vxworks-simlinux | vxworks-simwindows | vxworks-spe) + kfreebsd*-gnu*- | kopensolaris*-gnu*-) ;; - nto-qnx*) + vxworks-simlinux- | vxworks-simwindows- | vxworks-spe-) ;; - os2-emx) + nto-qnx*-) + ;; + os2-emx-) + ;; + *-eabi*- | *-gnueabi*-) ;; - *-eabi* | *-gnueabi*) + none--*) + # None (no kernel, i.e. freestanding / bare metal), + # can be paired with an machine code file format ;; - -*) + -*-) # Blank kernel with real OS is always fine. ;; - *-*) - echo "Invalid configuration \`$1': Kernel \`$kernel' not known to work with OS \`$os'." 1>&2 + --*) + # Blank kernel and OS with real machine code file format is always fine. + ;; + *-*-*) + echo "Invalid configuration '$1': Kernel '$kernel' not known to work with OS '$os'." 1>&2 exit 1 ;; esac @@ -1896,7 +1949,7 @@ case $vendor in ;; esac -echo "$cpu-$vendor-${kernel:+$kernel-}$os" +echo "$cpu-$vendor${kernel:+-$kernel}${os:+-$os}${obj:+-$obj}" exit # Local variables: diff --git a/depends/description.md b/depends/description.md index 0a6f2e6442190..69ee5bd36ccd8 100644 --- a/depends/description.md +++ b/depends/description.md @@ -6,8 +6,7 @@ There are several features that make it different from most similar systems: In theory, binaries for any target OS/architecture can be created, from a builder running any OS/architecture. In practice, build-side tools must be specified when the defaults don't fit, and packages must be amended to work -on new hosts. For now, a build architecture of x86_64 is assumed, either on -Linux or macOS. +on new hosts. ### No reliance on timestamps @@ -28,7 +27,7 @@ etc), and as well as a hash of the same data for each recursive dependency. If any portion of a package's build recipe changes, it will be rebuilt as well as any other package that depends on it. If any of the main makefiles (Makefile, funcs.mk, etc) are changed, all packages will be rebuilt. After building, the -results are cached into a tarball that can be re-used and distributed. +results are cached into a tarball that can be reused and distributed. ### Package build results are (relatively) deterministic. diff --git a/depends/gen_id b/depends/gen_id index 3341310e460a8..8518b4e67442e 100755 --- a/depends/gen_id +++ b/depends/gen_id @@ -1,7 +1,7 @@ #!/usr/bin/env bash # Usage: env [ CC=... ] [ C_STANDARD=...] [ CXX=... ] [CXX_STANDARD=...] \ -# [ AR=... ] [ RANLIB=... ] [ STRIP=... ] [ DEBUG=... ] \ +# [ AR=... ] [ NM=... ] [ RANLIB=... ] [ STRIP=... ] [ DEBUG=... ] \ # [ LTO=... ] [ NO_HARDEN=... ] ./build-id [ID_SALT]... # # Prints to stdout a SHA256 hash representing the current toolset, used by @@ -56,6 +56,11 @@ echo "ZERO_AR_DATE=${ZERO_AR_DATE}" echo "END AR" + echo "BEGIN NM" + bash -c "${NM} --version" + env | grep '^NM_' + echo "END NM" + echo "BEGIN RANLIB" bash -c "${RANLIB} --version" env | grep '^RANLIB_' diff --git a/depends/hosts/android.mk b/depends/hosts/android.mk index db32d02fe84a9..32e3045bc5864 100644 --- a/depends/hosts/android.mk +++ b/depends/hosts/android.mk @@ -11,11 +11,6 @@ endif android_CFLAGS=-std=$(C_STANDARD) android_CXXFLAGS=-std=$(CXX_STANDARD) -ifneq ($(LTO),) -android_CFLAGS += -flto -android_LDFLAGS += -flto -endif - android_AR=$(ANDROID_TOOLCHAIN_BIN)/llvm-ar android_RANLIB=$(ANDROID_TOOLCHAIN_BIN)/llvm-ranlib android_NM=$(ANDROID_TOOLCHAIN_BIN)/llvm-nm diff --git a/depends/hosts/darwin.mk b/depends/hosts/darwin.mk index 522a6b17efc15..b94ac7d56fea2 100644 --- a/depends/hosts/darwin.mk +++ b/depends/hosts/darwin.mk @@ -1,8 +1,8 @@ -OSX_MIN_VERSION=10.15 -OSX_SDK_VERSION=11.0 -XCODE_VERSION=12.2 -XCODE_BUILD_ID=12B45b -LD64_VERSION=609 +OSX_MIN_VERSION=11.0 +OSX_SDK_VERSION=14.0 +XCODE_VERSION=15.0 +XCODE_BUILD_ID=15A240d +LD64_VERSION=711 OSX_SDK=$(SDK_PATH)/Xcode-$(XCODE_VERSION)-$(XCODE_BUILD_ID)-extracted-SDK-with-libcxx-headers @@ -19,7 +19,6 @@ clang_prog=$(build_prefix)/bin/clang clangxx_prog=$(clang_prog)++ llvm_config_prog=$(build_prefix)/bin/llvm-config -clang_resource_dir=$(build_prefix)/lib/clang/$(native_clang_version) else # FORCE_USE_SYSTEM_CLANG is non-empty, so we use the clang from the user's # system @@ -37,7 +36,6 @@ clang_prog=$(shell $(SHELL) $(.SHELLFLAGS) "command -v clang") clangxx_prog=$(shell $(SHELL) $(.SHELLFLAGS) "command -v clang++") llvm_config_prog=$(shell $(SHELL) $(.SHELLFLAGS) "command -v llvm-config") -clang_resource_dir=$(shell clang -print-resource-dir) llvm_lib_dir=$(shell $(llvm_config_prog) --libdir) endif @@ -63,62 +61,45 @@ $(foreach TOOL,$(cctools_TOOLS),$(eval darwin_$(TOOL) = $$(build_prefix)/bin/$$( # Explicitly point to our binaries (e.g. cctools) so that they are # ensured to be found and preferred over other possibilities. # -# -stdlib=libc++ -stdlib++-isystem$(OSX_SDK)/usr/include/c++/v1 +# -isysroot$(OSX_SDK) -nostdlibinc # -# Forces clang to use the libc++ headers from our SDK and completely -# forget about the libc++ headers from the standard directories +# Disable default include paths built into the compiler as well as +# those normally included for libc and libc++. The only path that +# remains implicitly is the clang resource dir. # -# -Xclang -*system \ -# -Xclang -*system \ -# -Xclang -*system ... +# -iwithsysroot / -iframeworkwithsysroot # -# Adds path_a, path_b, and path_c to the bottom of clang's list of -# include search paths. This is used to explicitly specify the list of -# system include search paths and its ordering, rather than rely on -# clang's autodetection routine. This routine has been shown to: -# 1. Fail to pickup libc++ headers in $SYSROOT/usr/include/c++/v1 -# when clang was built manually (see: https://github.com/bitcoin/bitcoin/pull/17919#issuecomment-656785034) -# 2. Fail to pickup C headers in $SYSROOT/usr/include when -# C_INCLUDE_DIRS was specified at configure time (see: https://gist.github.com/dongcarl/5cdc6990b7599e8a5bf6d2a9c70e82f9) +# Adds the desired paths from the SDK # -# Talking directly to cc1 with -Xclang here grants us access to specify -# more granular categories for these system include search paths, and we -# can use the correct categories that these search paths would have been -# placed in if the autodetection routine had worked correctly. (see: -# https://gist.github.com/dongcarl/5cdc6990b7599e8a5bf6d2a9c70e82f9#the-treatment) -# -# Furthermore, it places these search paths after any "non-Xclang" -# specified search paths. This prevents any additional clang options or -# environment variables from coming after or in between these system -# include search paths, as that would be wrong in general but would also -# break #include_next's. +# -platform_version # +# Indicate to the linker the platform, the oldest supported version, +# and the SDK used. + darwin_CC=env -u C_INCLUDE_PATH -u CPLUS_INCLUDE_PATH \ -u OBJC_INCLUDE_PATH -u OBJCPLUS_INCLUDE_PATH -u CPATH \ -u LIBRARY_PATH \ - $(clang_prog) --target=$(host) -mmacosx-version-min=$(OSX_MIN_VERSION) \ - -B$(build_prefix)/bin -mlinker-version=$(LD64_VERSION) \ - -isysroot$(OSX_SDK) \ - -Xclang -internal-externc-isystem -Xclang $(clang_resource_dir)/include \ - -Xclang -internal-externc-isystem -Xclang $(OSX_SDK)/usr/include + $(clang_prog) --target=$(host) \ + -B$(build_prefix)/bin \ + -isysroot$(OSX_SDK) -nostdlibinc \ + -iwithsysroot/usr/include -iframeworkwithsysroot/System/Library/Frameworks + darwin_CXX=env -u C_INCLUDE_PATH -u CPLUS_INCLUDE_PATH \ -u OBJC_INCLUDE_PATH -u OBJCPLUS_INCLUDE_PATH -u CPATH \ -u LIBRARY_PATH \ - $(clangxx_prog) --target=$(host) -mmacosx-version-min=$(OSX_MIN_VERSION) \ - -B$(build_prefix)/bin -mlinker-version=$(LD64_VERSION) \ - -isysroot$(OSX_SDK) \ - -stdlib=libc++ \ - -stdlib++-isystem$(OSX_SDK)/usr/include/c++/v1 \ - -Xclang -internal-externc-isystem -Xclang $(clang_resource_dir)/include \ - -Xclang -internal-externc-isystem -Xclang $(OSX_SDK)/usr/include - -darwin_CFLAGS=-pipe -std=$(C_STANDARD) -darwin_CXXFLAGS=-pipe -std=$(CXX_STANDARD) - -ifneq ($(LTO),) -darwin_CFLAGS += -flto -darwin_CXXFLAGS += -flto -darwin_LDFLAGS += -flto + $(clangxx_prog) --target=$(host) \ + -B$(build_prefix)/bin \ + -isysroot$(OSX_SDK) -nostdlibinc \ + -iwithsysroot/usr/include/c++/v1 \ + -iwithsysroot/usr/include -iframeworkwithsysroot/System/Library/Frameworks + +darwin_CFLAGS=-pipe -std=$(C_STANDARD) -mmacosx-version-min=$(OSX_MIN_VERSION) +darwin_CXXFLAGS=-pipe -std=$(CXX_STANDARD) -mmacosx-version-min=$(OSX_MIN_VERSION) +darwin_LDFLAGS=-Wl,-platform_version,macos,$(OSX_MIN_VERSION),$(OSX_SDK_VERSION) + +ifneq ($(build_os),darwin) +darwin_CFLAGS += -mlinker-version=$(LD64_VERSION) +darwin_CXXFLAGS += -mlinker-version=$(LD64_VERSION) endif darwin_release_CFLAGS=-O2 diff --git a/depends/hosts/freebsd.mk b/depends/hosts/freebsd.mk index 5351d0b90095b..055097b03ddc8 100644 --- a/depends/hosts/freebsd.mk +++ b/depends/hosts/freebsd.mk @@ -1,12 +1,6 @@ freebsd_CFLAGS=-pipe -std=$(C_STANDARD) freebsd_CXXFLAGS=-pipe -std=$(CXX_STANDARD) -ifneq ($(LTO),) -freebsd_CFLAGS += -flto -freebsd_CXXFLAGS += -flto -freebsd_LDFLAGS += -flto -endif - freebsd_release_CFLAGS=-O2 freebsd_release_CXXFLAGS=$(freebsd_release_CFLAGS) diff --git a/depends/hosts/linux.mk b/depends/hosts/linux.mk index 0e2496174e302..8be23be57db03 100644 --- a/depends/hosts/linux.mk +++ b/depends/hosts/linux.mk @@ -2,10 +2,6 @@ linux_CFLAGS=-pipe -std=$(C_STANDARD) linux_CXXFLAGS=-pipe -std=$(CXX_STANDARD) ifneq ($(LTO),) -linux_CFLAGS += -flto -linux_CXXFLAGS += -flto -linux_LDFLAGS += -flto - linux_AR = $(host_toolchain)gcc-ar linux_NM = $(host_toolchain)gcc-nm linux_RANLIB = $(host_toolchain)gcc-ranlib @@ -17,7 +13,7 @@ linux_release_CXXFLAGS=$(linux_release_CFLAGS) linux_debug_CFLAGS=-O1 linux_debug_CXXFLAGS=$(linux_debug_CFLAGS) -linux_debug_CPPFLAGS=-D_GLIBCXX_DEBUG -D_GLIBCXX_DEBUG_PEDANTIC -D_LIBCPP_ENABLE_ASSERTIONS=1 +linux_debug_CPPFLAGS=-D_GLIBCXX_DEBUG -D_GLIBCXX_DEBUG_PEDANTIC -D_LIBCPP_ENABLE_DEBUG_MODE=1 ifeq (86,$(findstring 86,$(build_arch))) i686_linux_CC=gcc -m32 diff --git a/depends/hosts/mingw32.mk b/depends/hosts/mingw32.mk index fc1cc1afbe5cc..15aa7cd25ad05 100644 --- a/depends/hosts/mingw32.mk +++ b/depends/hosts/mingw32.mk @@ -6,10 +6,6 @@ mingw32_CFLAGS=-pipe -std=$(C_STANDARD) mingw32_CXXFLAGS=-pipe -std=$(CXX_STANDARD) ifneq ($(LTO),) -mingw32_CFLAGS += -flto -mingw32_CXXFLAGS += -flto -mingw32_LDFLAGS += -flto - mingw32_AR = $(host_toolchain)gcc-ar mingw32_NM = $(host_toolchain)gcc-nm mingw32_RANLIB = $(host_toolchain)gcc-ranlib diff --git a/depends/hosts/netbsd.mk b/depends/hosts/netbsd.mk index 14121dca20f32..f33b2d2889571 100644 --- a/depends/hosts/netbsd.mk +++ b/depends/hosts/netbsd.mk @@ -2,10 +2,6 @@ netbsd_CFLAGS=-pipe -std=$(C_STANDARD) netbsd_CXXFLAGS=-pipe -std=$(CXX_STANDARD) ifneq ($(LTO),) -netbsd_CFLAGS += -flto -netbsd_CXXFLAGS += -flto -netbsd_LDFLAGS += -flto - netbsd_AR = $(host_toolchain)gcc-ar netbsd_NM = $(host_toolchain)gcc-nm netbsd_RANLIB = $(host_toolchain)gcc-ranlib diff --git a/depends/hosts/openbsd.mk b/depends/hosts/openbsd.mk index d330e94d2ed2d..bdd36dc9b35de 100644 --- a/depends/hosts/openbsd.mk +++ b/depends/hosts/openbsd.mk @@ -1,12 +1,6 @@ openbsd_CFLAGS=-pipe -std=$(C_STANDARD) openbsd_CXXFLAGS=-pipe -std=$(CXX_STANDARD) -ifneq ($(LTO),) -openbsd_CFLAGS += -flto -openbsd_CXXFLAGS += -flto -openbsd_LDFLAGS += -flto -endif - openbsd_release_CFLAGS=-O2 openbsd_release_CXXFLAGS=$(openbsd_release_CFLAGS) diff --git a/depends/packages/boost.mk b/depends/packages/boost.mk index ebc097d686f58..ab43764b38f27 100644 --- a/depends/packages/boost.mk +++ b/depends/packages/boost.mk @@ -3,6 +3,11 @@ $(package)_version=1.81.0 $(package)_download_path=https://boostorg.jfrog.io/artifactory/main/release/$($(package)_version)/source/ $(package)_file_name=boost_$(subst .,_,$($(package)_version)).tar.bz2 $(package)_sha256_hash=71feeed900fbccca04a3b4f2f84a7c217186f28a940ed8b7ed4725986baf99fa +$(package)_patches=process_macos_sdk.patch + +define $(package)_preprocess_cmds + patch -p1 < $($(package)_patch_dir)/process_macos_sdk.patch +endef define $(package)_stage_cmds mkdir -p $($(package)_staging_prefix_dir)/include && \ diff --git a/depends/packages/capnp.mk b/depends/packages/capnp.mk index f4778c1ecdc32..2465c8091bbeb 100644 --- a/depends/packages/capnp.mk +++ b/depends/packages/capnp.mk @@ -4,17 +4,20 @@ $(package)_download_path=$(native_$(package)_download_path) $(package)_download_file=$(native_$(package)_download_file) $(package)_file_name=$(native_$(package)_file_name) $(package)_sha256_hash=$(native_$(package)_sha256_hash) -$(package)_dependencies=native_$(package) +# Hardcode library install path to "lib" to match the PKG_CONFIG_PATH +# setting in depends/config.site.in, which also hardcodes "lib". +# Without this setting, cmake by default would use the OS library +# directory, which might be "lib64" or something else, not "lib", on multiarch systems. define $(package)_set_vars := -$(package)_config_opts := --with-external-capnp -$(package)_config_opts += CAPNP="$$(native_capnp_prefixbin)/capnp" -$(package)_config_opts += CAPNP_CXX="$$(native_capnp_prefixbin)/capnp-c++" -$(package)_config_opts_android := --disable-shared + $(package)_config_opts := -DBUILD_TESTING=OFF + $(package)_config_opts += -DWITH_OPENSSL=OFF + $(package)_config_opts += -DWITH_ZLIB=OFF + $(package)_config_opts += -DCMAKE_INSTALL_LIBDIR=lib/ endef define $(package)_config_cmds - $($(package)_autoconf) + $($(package)_cmake) . endef define $(package)_build_cmds @@ -24,3 +27,7 @@ endef define $(package)_stage_cmds $(MAKE) DESTDIR=$($(package)_staging_dir) install endef + +define $(package)_postprocess_cmds + rm -rf lib/pkgconfig +endef diff --git a/depends/packages/libmultiprocess.mk b/depends/packages/libmultiprocess.mk index 765d6493776bb..d237f52dbb237 100644 --- a/depends/packages/libmultiprocess.mk +++ b/depends/packages/libmultiprocess.mk @@ -8,7 +8,13 @@ ifneq ($(host),$(build)) $(package)_dependencies += native_capnp endif +# Hardcode library install path to "lib" to match the PKG_CONFIG_PATH +# setting in depends/config.site.in, which also hardcodes "lib". +# Without this setting, cmake by default would use the OS library +# directory, which might be "lib64" or something else, not "lib", on multiarch systems. define $(package)_set_vars := +$(package)_config_opts += -DCMAKE_INSTALL_LIBDIR=lib/ +$(package)_config_opts += -DCMAKE_POSITION_INDEPENDENT_CODE=ON ifneq ($(host),$(build)) $(package)_config_opts := -DCAPNP_EXECUTABLE="$$(native_capnp_prefixbin)/capnp" $(package)_config_opts += -DCAPNPC_CXX_EXECUTABLE="$$(native_capnp_prefixbin)/capnpc-c++" diff --git a/depends/packages/native_capnp.mk b/depends/packages/native_capnp.mk index ed5a6deee2fdb..484e78d5d906b 100644 --- a/depends/packages/native_capnp.mk +++ b/depends/packages/native_capnp.mk @@ -1,12 +1,18 @@ package=native_capnp -$(package)_version=0.7.0 +$(package)_version=1.0.1 $(package)_download_path=https://capnproto.org/ $(package)_download_file=capnproto-c++-$($(package)_version).tar.gz $(package)_file_name=capnproto-cxx-$($(package)_version).tar.gz -$(package)_sha256_hash=c9a4c0bd88123064d483ab46ecee777f14d933359e23bff6fb4f4dbd28b4cd41 +$(package)_sha256_hash=0f7f4b8a76a2cdb284fddef20de8306450df6dd031a47a15ac95bc43c3358e09 + +define $(package)_set_vars + $(package)_config_opts := -DBUILD_TESTING=OFF + $(package)_config_opts += -DWITH_OPENSSL=OFF + $(package)_config_opts += -DWITH_ZLIB=OFF +endef define $(package)_config_cmds - $($(package)_autoconf) + $($(package)_cmake) . endef define $(package)_build_cmds @@ -16,3 +22,7 @@ endef define $(package)_stage_cmds $(MAKE) DESTDIR=$($(package)_staging_dir) install endef + +define $(package)_postprocess_cmds + rm -rf lib/pkgconfig +endef diff --git a/depends/packages/native_cctools.mk b/depends/packages/native_cctools.mk index 03e9002ecd759..3148e51048c5c 100644 --- a/depends/packages/native_cctools.mk +++ b/depends/packages/native_cctools.mk @@ -1,8 +1,8 @@ package=native_cctools -$(package)_version=2ef2e931cf641547eb8a68cfebde61003587c9fd +$(package)_version=c74fafe86076713cb8e6f937af43b6df6da1f42d $(package)_download_path=https://github.com/tpoechtrager/cctools-port/archive $(package)_file_name=$($(package)_version).tar.gz -$(package)_sha256_hash=6b73269efdf5c58a070e7357b66ee760501388549d6a12b423723f45888b074b +$(package)_sha256_hash=e2c1588d505a69c32e079f4e616e0f117d5478429040e394f624f43f2796e6bc $(package)_build_subdir=cctools $(package)_dependencies=native_libtapi @@ -17,13 +17,9 @@ endef ifneq ($(strip $(FORCE_USE_SYSTEM_CLANG)),) define $(package)_preprocess_cmds mkdir -p $($(package)_staging_prefix_dir)/lib && \ - cp $(llvm_lib_dir)/libLTO.so $($(package)_staging_prefix_dir)/lib/ && \ - cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub cctools + cp $(llvm_lib_dir)/libLTO.so $($(package)_staging_prefix_dir)/lib/ endef else -define $(package)_preprocess_cmds - cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub cctools -endef endif define $(package)_config_cmds diff --git a/depends/packages/native_ds_store.mk b/depends/packages/native_ds_store.mk deleted file mode 100644 index 51a95f48ef7b1..0000000000000 --- a/depends/packages/native_ds_store.mk +++ /dev/null @@ -1,15 +0,0 @@ -package=native_ds_store -$(package)_version=1.3.0 -$(package)_download_path=https://github.com/dmgbuild/ds_store/archive/ -$(package)_file_name=v$($(package)_version).tar.gz -$(package)_sha256_hash=76b3280cd4e19e5179defa23fb594a9dd32643b0c80d774bd3108361d94fb46d -$(package)_install_libdir=$(build_prefix)/lib/python3/dist-packages - -define $(package)_build_cmds - python3 setup.py build -endef - -define $(package)_stage_cmds - mkdir -p $($(package)_install_libdir) && \ - python3 setup.py install --root=$($(package)_staging_dir) --prefix=$(build_prefix) --install-lib=$($(package)_install_libdir) -endef diff --git a/depends/packages/native_libmultiprocess.mk b/depends/packages/native_libmultiprocess.mk index e647afba5f35c..946e885354aac 100644 --- a/depends/packages/native_libmultiprocess.mk +++ b/depends/packages/native_libmultiprocess.mk @@ -1,8 +1,8 @@ package=native_libmultiprocess -$(package)_version=1af83d15239ccfa7e47b8764029320953dd7fdf1 +$(package)_version=414542f81e0997354b45b8ade13ca144a3e35ff1 $(package)_download_path=https://github.com/chaincodelabs/libmultiprocess/archive $(package)_file_name=$($(package)_version).tar.gz -$(package)_sha256_hash=e5587d3feedc7f8473f178a89b94163a11076629825d664964799bbbd5844da5 +$(package)_sha256_hash=8542dbaf8c4fce8fd7af6929f5dc9b34dffa51c43e9ee360e93ee0f34b180bc2 $(package)_dependencies=native_capnp define $(package)_config_cmds diff --git a/depends/packages/native_libtapi.mk b/depends/packages/native_libtapi.mk index 052bb2368933a..fb5ab0b4dccc6 100644 --- a/depends/packages/native_libtapi.mk +++ b/depends/packages/native_libtapi.mk @@ -1,13 +1,18 @@ package=native_libtapi -$(package)_version=664b8414f89612f2dfd35a9b679c345aa5389026 +$(package)_version=eb33a59f2e30ff9724dc1ea8bee8b5229b0557c9 $(package)_download_path=https://github.com/tpoechtrager/apple-libtapi/archive $(package)_file_name=$($(package)_version).tar.gz -$(package)_sha256_hash=62e419c12d1c9fad67cc1cd523132bc00db050998337c734c15bc8d73cc02b61 +$(package)_sha256_hash=d4d46c64622f13d6938cecf989046d9561011bb59e8ee835f8f39825d67f578f +$(package)_patches=disable_zlib.patch ifeq ($(strip $(FORCE_USE_SYSTEM_CLANG)),) -$(package)_dependencies=native_clang +$(package)_dependencies=native_llvm endif +define $(package)_preprocess_cmds + patch -p1 < $($(package)_patch_dir)/disable_zlib.patch +endef + define $(package)_build_cmds CC=$(clang_prog) CXX=$(clangxx_prog) INSTALLPREFIX=$($(package)_staging_prefix_dir) ./build.sh endef diff --git a/depends/packages/native_clang.mk b/depends/packages/native_llvm.mk similarity index 67% rename from depends/packages/native_clang.mk rename to depends/packages/native_llvm.mk index f2712294ab20e..09994eb01296a 100644 --- a/depends/packages/native_clang.mk +++ b/depends/packages/native_llvm.mk @@ -1,20 +1,17 @@ -package=native_clang -$(package)_version=10.0.1 +package=native_llvm +$(package)_version=17.0.6 +$(package)_major_version=$(firstword $(subst ., ,$($(package)_version))) $(package)_download_path=https://github.com/llvm/llvm-project/releases/download/llvmorg-$($(package)_version) ifneq (,$(findstring aarch64,$(BUILD))) $(package)_file_name=clang+llvm-$($(package)_version)-aarch64-linux-gnu.tar.xz -$(package)_sha256_hash=90dc69a4758ca15cd0ffa45d07fbf5bf4309d47d2c7745a9f0735ecffde9c31f +$(package)_sha256_hash=6dd62762285326f223f40b8e4f2864b5c372de3f7de0731cb7cd55ca5287b75a else -$(package)_file_name=clang+llvm-$($(package)_version)-x86_64-linux-gnu-ubuntu-16.04.tar.xz -$(package)_sha256_hash=48b83ef827ac2c213d5b64f5ad7ed082c8bcb712b46644e0dc5045c6f462c231 +$(package)_file_name=clang+llvm-$($(package)_version)-x86_64-linux-gnu-ubuntu-22.04.tar.xz +$(package)_sha256_hash=884ee67d647d77e58740c1e645649e29ae9e8a6fe87c1376be0f3a30f3cc9ab3 endif -define $(package)_preprocess_cmds - rm -f $($(package)_extract_dir)/lib/libc++abi.so* -endef - define $(package)_stage_cmds - mkdir -p $($(package)_staging_prefix_dir)/lib/clang/$($(package)_version)/include && \ + mkdir -p $($(package)_staging_prefix_dir)/lib/clang/$($(package)_major_version)/include && \ mkdir -p $($(package)_staging_prefix_dir)/bin && \ mkdir -p $($(package)_staging_prefix_dir)/include/llvm-c && \ cp bin/clang $($(package)_staging_prefix_dir)/bin/ && \ @@ -24,5 +21,5 @@ define $(package)_stage_cmds cp include/llvm-c/ExternC.h $($(package)_staging_prefix_dir)/include/llvm-c && \ cp include/llvm-c/lto.h $($(package)_staging_prefix_dir)/include/llvm-c && \ cp lib/libLTO.so $($(package)_staging_prefix_dir)/lib/ && \ - cp -r lib/clang/$($(package)_version)/include/* $($(package)_staging_prefix_dir)/lib/clang/$($(package)_version)/include/ + cp -r lib/clang/$($(package)_major_version)/include/* $($(package)_staging_prefix_dir)/lib/clang/$($(package)_major_version)/include/ endef diff --git a/depends/packages/native_mac_alias.mk b/depends/packages/native_mac_alias.mk deleted file mode 100644 index ddd631186edf7..0000000000000 --- a/depends/packages/native_mac_alias.mk +++ /dev/null @@ -1,15 +0,0 @@ -package=native_mac_alias -$(package)_version=2.2.0 -$(package)_download_path=https://github.com/dmgbuild/mac_alias/archive/ -$(package)_file_name=v$($(package)_version).tar.gz -$(package)_sha256_hash=421e6d7586d1f155c7db3e7da01ca0dacc9649a509a253ad7077b70174426499 -$(package)_install_libdir=$(build_prefix)/lib/python3/dist-packages - -define $(package)_build_cmds - python3 setup.py build -endef - -define $(package)_stage_cmds - mkdir -p $($(package)_install_libdir) && \ - python3 setup.py install --root=$($(package)_staging_dir) --prefix=$(build_prefix) --install-lib=$($(package)_install_libdir) -endef diff --git a/depends/packages/packages.mk b/depends/packages/packages.mk index bb9476f9b6b02..541f56a28498d 100644 --- a/depends/packages/packages.mk +++ b/depends/packages/packages.mk @@ -17,13 +17,13 @@ multiprocess_native_packages = native_libmultiprocess native_capnp usdt_linux_packages=systemtap -darwin_native_packages = native_ds_store native_mac_alias +darwin_native_packages = ifneq ($(build_os),darwin) darwin_native_packages += native_cctools native_libtapi ifeq ($(strip $(FORCE_USE_SYSTEM_CLANG)),) -darwin_native_packages+= native_clang +darwin_native_packages+= native_llvm endif endif diff --git a/depends/packages/systemtap.mk b/depends/packages/systemtap.mk index 541ebeee01408..c912e18c31e77 100644 --- a/depends/packages/systemtap.mk +++ b/depends/packages/systemtap.mk @@ -3,11 +3,10 @@ $(package)_version=4.8 $(package)_download_path=https://sourceware.org/ftp/systemtap/releases/ $(package)_file_name=$(package)-$($(package)_version).tar.gz $(package)_sha256_hash=cbd50a4eba5b261394dc454c12448ddec73e55e6742fda7f508f9fbc1331c223 -$(package)_patches=remove_SDT_ASM_SECTION_AUTOGROUP_SUPPORT_check.patch fix_variadic_warning.patch +$(package)_patches=remove_SDT_ASM_SECTION_AUTOGROUP_SUPPORT_check.patch define $(package)_preprocess_cmds patch -p1 < $($(package)_patch_dir)/remove_SDT_ASM_SECTION_AUTOGROUP_SUPPORT_check.patch && \ - patch -p1 < $($(package)_patch_dir)/fix_variadic_warning.patch && \ mkdir -p $($(package)_staging_prefix_dir)/include/sys && \ cp includes/sys/sdt.h $($(package)_staging_prefix_dir)/include/sys/sdt.h endef diff --git a/depends/packages/xcb_proto.mk b/depends/packages/xcb_proto.mk index 9be822506dbc9..6e1c5a10a87ba 100644 --- a/depends/packages/xcb_proto.mk +++ b/depends/packages/xcb_proto.mk @@ -1,8 +1,8 @@ package=xcb_proto -$(package)_version=1.14.1 +$(package)_version=1.15.2 $(package)_download_path=https://xorg.freedesktop.org/archive/individual/proto $(package)_file_name=xcb-proto-$($(package)_version).tar.xz -$(package)_sha256_hash=f04add9a972ac334ea11d9d7eb4fc7f8883835da3e4859c9afa971efdf57fcc3 +$(package)_sha256_hash=7072beb1f680a2fe3f9e535b797c146d22528990c72f63ddb49d2f350a3653ed define $(package)_config_cmds $($(package)_autoconf) diff --git a/depends/packages/zeromq.mk b/depends/packages/zeromq.mk index d7152327934fb..cc78999dbbeb9 100644 --- a/depends/packages/zeromq.mk +++ b/depends/packages/zeromq.mk @@ -1,9 +1,9 @@ package=zeromq -$(package)_version=4.3.4 +$(package)_version=4.3.5 $(package)_download_path=https://github.com/zeromq/libzmq/releases/download/v$($(package)_version)/ $(package)_file_name=$(package)-$($(package)_version).tar.gz -$(package)_sha256_hash=c593001a89f5a85dd2ddf564805deb860e02471171b3f204944857336295c3e5 -$(package)_patches=remove_libstd_link.patch netbsd_kevent_void.patch +$(package)_sha256_hash=6653ef5910f17954861fe72332e68b03ca6e4d9c7160eb3a8de5a5a913bfab43 +$(package)_patches=remove_libstd_link.patch define $(package)_set_vars $(package)_config_opts = --without-docs --disable-shared --disable-valgrind @@ -19,8 +19,7 @@ define $(package)_set_vars endef define $(package)_preprocess_cmds - patch -p1 < $($(package)_patch_dir)/remove_libstd_link.patch && \ - patch -p1 < $($(package)_patch_dir)/netbsd_kevent_void.patch + patch -p1 < $($(package)_patch_dir)/remove_libstd_link.patch endef define $(package)_config_cmds diff --git a/depends/patches/boost/process_macos_sdk.patch b/depends/patches/boost/process_macos_sdk.patch new file mode 100644 index 0000000000000..ebc556d97270e --- /dev/null +++ b/depends/patches/boost/process_macos_sdk.patch @@ -0,0 +1,27 @@ +Fix Boost Process compilation with macOS 14 SDK. +Can be dropped with Boost 1.84.0. +https://github.com/boostorg/process/pull/343. +https://github.com/boostorg/process/issues/342. + +diff --git a/boost/process/detail/posix/handles.hpp b/boost/process/detail/posix/handles.hpp +index cd9e1ce5a..304e77b1c 100644 +--- a/boost/process/detail/posix/handles.hpp ++++ b/boost/process/detail/posix/handles.hpp +@@ -33,7 +33,7 @@ inline std::vector get_handles(std::error_code & ec) + else + ec.clear(); + +- auto my_fd = ::dirfd(dir.get()); ++ auto my_fd = dirfd(dir.get()); + + struct ::dirent * ent_p; + +@@ -117,7 +117,7 @@ struct limit_handles_ : handler_base_ext + return; + } + +- auto my_fd = ::dirfd(dir); ++ auto my_fd = dirfd(dir); + struct ::dirent * ent_p; + + while ((ent_p = readdir(dir)) != nullptr) diff --git a/depends/patches/native_libtapi/disable_zlib.patch b/depends/patches/native_libtapi/disable_zlib.patch new file mode 100644 index 0000000000000..6c7691214aa84 --- /dev/null +++ b/depends/patches/native_libtapi/disable_zlib.patch @@ -0,0 +1,17 @@ +build: disable zlib + +This isn't needed, and causes issues when clang-tblgen +is built, but trys to reach for a system libz.so. + +diff --git a/build.sh b/build.sh +index e25d2f732..ec8422621 100755 +--- a/build.sh ++++ b/build.sh +@@ -66,6 +66,7 @@ cmake ../src/llvm \ + -DCMAKE_INSTALL_PREFIX=$INSTALLPREFIX \ + -DTAPI_REPOSITORY_STRING=$TAPI_VERSION \ + -DTAPI_FULL_VERSION=$TAPI_VERSION \ ++ -DLLVM_ENABLE_ZLIB=OFF \ + $CMAKE_EXTRA_ARGS + + echo "" diff --git a/depends/patches/qt/dont_hardcode_pwd.patch b/depends/patches/qt/dont_hardcode_pwd.patch deleted file mode 100644 index a74e9cb09872b..0000000000000 --- a/depends/patches/qt/dont_hardcode_pwd.patch +++ /dev/null @@ -1,27 +0,0 @@ -commit 0e953866fc4672486e29e1ba6d83b4207e7b2f0b -Author: fanquake -Date: Tue Aug 18 15:09:06 2020 +0800 - - Don't hardcode pwd path - - Let a man use his builtins if he wants to! Also, removes the unnecessary - assumption that pwd lives under /bin/pwd. - - See #15581. - -diff --git a/qtbase/configure b/qtbase/configure -index 08b49a8d..faea5b55 100755 ---- a/qtbase/configure -+++ b/qtbase/configure -@@ -36,9 +36,9 @@ - relconf=`basename $0` - # the directory of this script is the "source tree" - relpath=`dirname $0` --relpath=`(cd "$relpath"; /bin/pwd)` -+relpath=`(cd "$relpath"; pwd)` - # the current directory is the "build tree" or "object tree" --outpath=`/bin/pwd` -+outpath=`pwd` - - WHICH="which" - diff --git a/depends/patches/qt/dont_hardcode_x86_64.patch b/depends/patches/qt/dont_hardcode_x86_64.patch deleted file mode 100644 index a66426877ad78..0000000000000 --- a/depends/patches/qt/dont_hardcode_x86_64.patch +++ /dev/null @@ -1,119 +0,0 @@ -macOS: Don't hard-code x86_64 as the architecture when using qmake - -Upstream commit: - - Qt 6.1: 9082cc8e8d5a6441dabe5e7a95bc0cd9085b95fe - -For other Qt branches see -https://codereview.qt-project.org/q/I70db7e4c27f0d3da5d0af33cb491d72c312d3fa8 - - ---- old/qtbase/configure.json -+++ new/qtbase/configure.json -@@ -244,11 +244,18 @@ - - "testTypeDependencies": { - "linkerSupportsFlag": [ "use_bfd_linker", "use_gold_linker", "use_lld_linker" ], -- "verifySpec": [ "shared", "use_bfd_linker", "use_gold_linker", "use_lld_linker", "compiler-flags", "qmakeargs", "commit" ], -+ "verifySpec": [ -+ "shared", -+ "use_bfd_linker", "use_gold_linker", "use_lld_linker", -+ "compiler-flags", "qmakeargs", -+ "simulator_and_device", -+ "thread", -+ "commit" ], - "compile": [ "verifyspec" ], - "detectPkgConfig": [ "cross_compile", "machineTuple" ], - "library": [ "pkg-config", "compiler-flags" ], -- "getPkgConfigVariable": [ "pkg-config" ] -+ "getPkgConfigVariable": [ "pkg-config" ], -+ "architecture" : [ "verifyspec" ] - }, - - "testTypeAliases": { -@@ -762,7 +769,7 @@ - }, - "architecture": { - "label": "Architecture", -- "output": [ "architecture" ] -+ "output": [ "architecture", "commitConfig" ] - }, - "pkg-config": { - "label": "Using pkg-config", -diff --git a/configure.pri b/configure.pri -index 49755f7abfd..8be9b10d7d4 100644 ---- old/qtbase/configure.pri -+++ new/qtbase/configure.pri -@@ -662,6 +662,13 @@ defineTest(qtConfOutput_commitOptions) { - write_file($$QT_BUILD_TREE/mkspecs/qdevice.pri, $${currentConfig}.output.devicePro)|error() - } - -+# Output is written after configuring each Qt module, -+# but some tests within a module might depend on the -+# configuration output of previous tests. -+defineTest(qtConfOutput_commitConfig) { -+ qtConfProcessOutput() -+} -+ - # type (empty or 'host'), option name, default value - defineTest(processQtPath) { - out_var = config.rel_input.$${2} -diff --git a/mkspecs/common/macx.conf b/mkspecs/common/macx.conf -index d16b77acb8e..4ba0a8eaa36 100644 ---- old/qtbase/mkspecs/common/macx.conf -+++ new/qtbase/mkspecs/common/macx.conf -@@ -6,7 +6,6 @@ QMAKE_PLATFORM += macos osx macx - QMAKE_MAC_SDK = macosx - - QMAKE_MACOSX_DEPLOYMENT_TARGET = 10.13 --QMAKE_APPLE_DEVICE_ARCHS = x86_64 - - # Should be 10.15, but as long as the CI builds with - # older SDKs we have to keep this. -diff --git a/mkspecs/features/mac/default_post.prf b/mkspecs/features/mac/default_post.prf -index 92a9112bca6..d888731ec8d 100644 ---- old/qtbase/mkspecs/features/mac/default_post.prf -+++ new/qtbase/mkspecs/features/mac/default_post.prf -@@ -95,6 +95,11 @@ app_extension_api_only { - QMAKE_LFLAGS += $$QMAKE_CFLAGS_APPLICATION_EXTENSION - } - -+# Non-universal builds do not set QMAKE_APPLE_DEVICE_ARCHS, -+# so we pick it up from what the arch test resolved instead. -+isEmpty(QMAKE_APPLE_DEVICE_ARCHS): \ -+ QMAKE_APPLE_DEVICE_ARCHS = $$QT_ARCH -+ - macx-xcode { - qmake_pkginfo_typeinfo.name = QMAKE_PKGINFO_TYPEINFO - !isEmpty(QMAKE_PKGINFO_TYPEINFO): \ -@@ -150,9 +155,6 @@ macx-xcode { - simulator: VALID_SIMULATOR_ARCHS = $$QMAKE_APPLE_SIMULATOR_ARCHS - VALID_ARCHS = $$VALID_DEVICE_ARCHS $$VALID_SIMULATOR_ARCHS - -- isEmpty(VALID_ARCHS): \ -- error("QMAKE_APPLE_DEVICE_ARCHS or QMAKE_APPLE_SIMULATOR_ARCHS must contain at least one architecture") -- - single_arch: VALID_ARCHS = $$first(VALID_ARCHS) - - ACTIVE_ARCHS = $(filter $(EXPORT_VALID_ARCHS), $(ARCHS)) -diff --git a/mkspecs/features/toolchain.prf b/mkspecs/features/toolchain.prf -index efbe7c1e55b..8add6dc8043 100644 ---- old/qtbase/mkspecs/features/toolchain.prf -+++ new/qtbase/mkspecs/features/toolchain.prf -@@ -182,9 +182,14 @@ isEmpty($${target_prefix}.INCDIRS) { - # UIKit simulator platforms will see the device SDK's sysroot in - # QMAKE_DEFAULT_*DIRS, because they're handled in a single build pass. - darwin { -- # Clang doesn't pick up the architecture from the sysroot, and will -- # default to the host architecture, so we need to manually set it. -- cxx_flags += -arch $$QMAKE_APPLE_DEVICE_ARCHS -+ uikit { -+ # Clang doesn't automatically pick up the architecture, just because -+ # we're passing the iOS sysroot below, and we will end up building the -+ # test for the host architecture, resulting in linker errors when -+ # linking against the iOS libraries. We work around this by passing -+ # the architecture explicitly. -+ cxx_flags += -arch $$first(QMAKE_APPLE_DEVICE_ARCHS) -+ } - - uikit:macx-xcode: \ - cxx_flags += -isysroot $$sdk_path_device.value diff --git a/depends/patches/qt/duplicate_lcqpafonts.patch b/depends/patches/qt/duplicate_lcqpafonts.patch deleted file mode 100644 index c460b51dcff67..0000000000000 --- a/depends/patches/qt/duplicate_lcqpafonts.patch +++ /dev/null @@ -1,104 +0,0 @@ -QtGui: Fix duplication of logging category lcQpaFonts - -Move it to qplatformfontdatabase.h. - -Upstream commit: - - Qt 6.0: ab01885e48873fb2ad71841a3f1627fe4d9cd835 - ---- a/qtbase/src/gui/text/qplatformfontdatabase.cpp -+++ b/qtbase/src/gui/text/qplatformfontdatabase.cpp -@@ -52,6 +52,8 @@ - - QT_BEGIN_NAMESPACE - -+Q_LOGGING_CATEGORY(lcQpaFonts, "qt.qpa.fonts") -+ - void qt_registerFont(const QString &familyname, const QString &stylename, - const QString &foundryname, int weight, - QFont::Style style, int stretch, bool antialiased, - ---- a/qtbase/src/gui/text/qplatformfontdatabase.h -+++ b/qtbase/src/gui/text/qplatformfontdatabase.h -@@ -50,6 +50,7 @@ - // - - #include -+#include - #include - #include - #include -@@ -62,6 +63,7 @@ - - QT_BEGIN_NAMESPACE - -+Q_DECLARE_LOGGING_CATEGORY(lcQpaFonts) - - class QWritingSystemsPrivate; - - ---- a/qtbase/src/platformsupport/fontdatabases/mac/qfontengine_coretext.mm -+++ b/qtbase/src/platformsupport/fontdatabases/mac/qfontengine_coretext.mm -@@ -86,8 +86,6 @@ - - QT_BEGIN_NAMESPACE - --Q_LOGGING_CATEGORY(lcQpaFonts, "qt.qpa.fonts") -- - static float SYNTHETIC_ITALIC_SKEW = std::tan(14.f * std::acos(0.f) / 90.f); - - bool QCoreTextFontEngine::ct_getSfntTable(void *user_data, uint tag, uchar *buffer, uint *length) - ---- a/qtbase/src/platformsupport/fontdatabases/mac/qfontengine_coretext_p.h -+++ b/qtbase/src/platformsupport/fontdatabases/mac/qfontengine_coretext_p.h -@@ -64,8 +64,6 @@ - - QT_BEGIN_NAMESPACE - --Q_DECLARE_LOGGING_CATEGORY(lcQpaFonts) -- - class QCoreTextFontEngine : public QFontEngine - { - Q_GADGET - ---- a/qtbase/src/platformsupport/fontdatabases/windows/qwindowsfontdatabase.cpp -+++ b/qtbase/src/platformsupport/fontdatabases/windows/qwindowsfontdatabase.cpp -@@ -68,8 +68,6 @@ - - QT_BEGIN_NAMESPACE - --Q_LOGGING_CATEGORY(lcQpaFonts, "qt.qpa.fonts") -- - #ifndef QT_NO_DIRECTWRITE - // ### fixme: Consider direct linking of dwrite.dll once Windows Vista pre SP2 is dropped (QTBUG-49711) - - ---- a/qtbase/src/platformsupport/fontdatabases/windows/qwindowsfontdatabase_p.h -+++ b/qtbase/src/platformsupport/fontdatabases/windows/qwindowsfontdatabase_p.h -@@ -63,8 +63,6 @@ - - QT_BEGIN_NAMESPACE - --Q_DECLARE_LOGGING_CATEGORY(lcQpaFonts) -- - class QWindowsFontEngineData - { - Q_DISABLE_COPY_MOVE(QWindowsFontEngineData) - ---- a/qtbase/src/platformsupport/themes/genericunix/qgenericunixthemes.cpp -+++ b/qtbase/src/platformsupport/themes/genericunix/qgenericunixthemes.cpp -@@ -40,6 +40,7 @@ - #include "qgenericunixthemes_p.h" - - #include "qpa/qplatformtheme_p.h" -+#include "qpa/qplatformfontdatabase.h" - - #include - #include -@@ -76,7 +77,6 @@ - QT_BEGIN_NAMESPACE - - Q_DECLARE_LOGGING_CATEGORY(qLcTray) --Q_LOGGING_CATEGORY(lcQpaFonts, "qt.qpa.fonts") - - ResourceHelper::ResourceHelper() - { diff --git a/depends/patches/qt/fast_fixed_dtoa_no_optimize.patch b/depends/patches/qt/fast_fixed_dtoa_no_optimize.patch deleted file mode 100644 index d4d6539f56dc4..0000000000000 --- a/depends/patches/qt/fast_fixed_dtoa_no_optimize.patch +++ /dev/null @@ -1,20 +0,0 @@ -Modify the optimisation flags for FastFixedDtoa. -This fixes a non-determinism issue in the asm produced for -this function when cross-compiling on x86_64 and aarch64 for -the arm-linux-gnueabihf HOST. - ---- a/qtbase/src/3rdparty/double-conversion/fixed-dtoa.h -+++ b/qtbase/src/3rdparty/double-conversion/fixed-dtoa.h -@@ -48,9 +48,12 @@ namespace double_conversion { - // - // This method only works for some parameters. If it can't handle the input it - // returns false. The output is null-terminated when the function succeeds. -+#pragma GCC push_options -+#pragma GCC optimize ("-O1") - bool FastFixedDtoa(double v, int fractional_count, - Vector buffer, int* length, int* decimal_point); - -+#pragma GCC pop_options - } // namespace double_conversion - - #endif // DOUBLE_CONVERSION_FIXED_DTOA_H_ diff --git a/depends/patches/qt/fix_android_jni_static.patch b/depends/patches/qt/fix_android_jni_static.patch deleted file mode 100644 index 936b82e1522e3..0000000000000 --- a/depends/patches/qt/fix_android_jni_static.patch +++ /dev/null @@ -1,18 +0,0 @@ ---- old/qtbase/src/plugins/platforms/android/androidjnimain.cpp -+++ new/qtbase/src/plugins/platforms/android/androidjnimain.cpp -@@ -943,6 +943,14 @@ Q_DECL_EXPORT jint JNICALL JNI_OnLoad(JavaVM *vm, void */*reserved*/) - __android_log_print(ANDROID_LOG_FATAL, "Qt", "registerNatives failed"); - return -1; - } -+ -+ const jint ret = QT_PREPEND_NAMESPACE(QtAndroidPrivate::initJNI(vm, env)); -+ if (ret != 0) -+ { -+ __android_log_print(ANDROID_LOG_FATAL, "Qt", "initJNI failed"); -+ return ret; -+ } -+ - QWindowSystemInterfacePrivate::TabletEvent::setPlatformSynthesizesMouse(false); - - m_javaVM = vm; - diff --git a/depends/patches/qt/fix_montery_include.patch b/depends/patches/qt/fix_montery_include.patch deleted file mode 100644 index 38b700addfe7a..0000000000000 --- a/depends/patches/qt/fix_montery_include.patch +++ /dev/null @@ -1,21 +0,0 @@ -From dece6f5840463ae2ddf927d65eb1b3680e34a547 -From: Øystein Heskestad -Date: Wed, 27 Oct 2021 13:07:46 +0200 -Subject: [PATCH] Add missing macOS header file that was indirectly included before - -See: https://bugreports.qt.io/browse/QTBUG-97855 - -Upstream Commits: - - Qt 6.2: c884bf138a21dd7320e35cef34d24e22e74d7ce0 - -diff --git a/qtbase/src/plugins/platforms/cocoa/qiosurfacegraphicsbuffer.h b/qtbase/src/plugins/platforms/cocoa/qiosurfacegraphicsbuffer.h -index e070ba97..07c75b04 100644 ---- a/qtbase/src/plugins/platforms/cocoa/qiosurfacegraphicsbuffer.h -+++ b/qtbase/src/plugins/platforms/cocoa/qiosurfacegraphicsbuffer.h -@@ -40,6 +40,7 @@ - #ifndef QIOSURFACEGRAPHICSBUFFER_H - #define QIOSURFACEGRAPHICSBUFFER_H - -+#include - #include - #include diff --git a/depends/patches/qt/fix_qt_pkgconfig.patch b/depends/patches/qt/fix_qt_pkgconfig.patch deleted file mode 100644 index 73f4d89f7354e..0000000000000 --- a/depends/patches/qt/fix_qt_pkgconfig.patch +++ /dev/null @@ -1,11 +0,0 @@ ---- old/qtbase/mkspecs/features/qt_module.prf -+++ new/qtbase/mkspecs/features/qt_module.prf -@@ -269,7 +269,7 @@ load(qt_installs) - load(qt_targets) - - # this builds on top of qt_common --!internal_module:if(unix|mingw):!if(darwin:debug_and_release:CONFIG(debug, debug|release)) { -+if(unix|mingw):!if(darwin:debug_and_release:CONFIG(debug, debug|release)) { - CONFIG += create_pc - QMAKE_PKGCONFIG_DESTDIR = pkgconfig - host_build: \ diff --git a/depends/patches/qt/guix_cross_lib_path.patch b/depends/patches/qt/guix_cross_lib_path.patch deleted file mode 100644 index 7911dc21d7dba..0000000000000 --- a/depends/patches/qt/guix_cross_lib_path.patch +++ /dev/null @@ -1,17 +0,0 @@ -Facilitate guix building with CROSS_LIBRARY_PATH - -See discussion in https://github.com/bitcoin/bitcoin/pull/15277. - ---- a/qtbase/mkspecs/features/toolchain.prf -+++ b/qtbase/mkspecs/features/toolchain.prf -@@ -236,8 +236,8 @@ isEmpty($${target_prefix}.INCDIRS) { - add_libraries = false - for (line, output) { - line ~= s/^[ \\t]*// # remove leading spaces -- contains(line, "LIBRARY_PATH=.*") { -- line ~= s/^LIBRARY_PATH=// # remove leading LIBRARY_PATH= -+ contains(line, "(CROSS_)?LIBRARY_PATH=.*") { -+ line ~= s/^(CROSS_)?LIBRARY_PATH=// # remove leading (CROSS_)?LIBRARY_PATH= - equals(QMAKE_HOST.os, Windows): \ - paths = $$split(line, ;) - else: \ diff --git a/depends/patches/qt/mac-qmake.conf b/depends/patches/qt/mac-qmake.conf deleted file mode 100644 index cb94bf07b42d8..0000000000000 --- a/depends/patches/qt/mac-qmake.conf +++ /dev/null @@ -1,22 +0,0 @@ -MAKEFILE_GENERATOR = UNIX -CONFIG += app_bundle incremental lib_version_first absolute_library_soname -QMAKE_INCREMENTAL_STYLE = sublib -include(../common/macx.conf) -include(../common/gcc-base-mac.conf) -include(../common/clang.conf) -include(../common/clang-mac.conf) -QMAKE_MAC_SDK_PATH=$${MAC_SDK_PATH} -QMAKE_XCODE_VERSION = $${XCODE_VERSION} -QMAKE_XCODE_DEVELOPER_PATH=/Developer -QMAKE_MAC_SDK=macosx -QMAKE_MAC_SDK.macosx.Path = $${MAC_SDK_PATH} -QMAKE_MAC_SDK.macosx.platform_name = macosx -QMAKE_MAC_SDK.macosx.SDKVersion = $${MAC_SDK_VERSION} -QMAKE_MAC_SDK.macosx.PlatformPath = /phony -!host_build: QMAKE_CFLAGS += -target $${MAC_TARGET} -!host_build: QMAKE_OBJECTIVE_CFLAGS += $$QMAKE_CFLAGS -!host_build: QMAKE_CXXFLAGS += -target $${MAC_TARGET} -!host_build: QMAKE_LFLAGS += -target $${MAC_TARGET} -QMAKE_AR = $${CROSS_COMPILE}ar cq -QMAKE_RANLIB=$${CROSS_COMPILE}ranlib -load(qt_config) diff --git a/depends/patches/qt/no-xlib.patch b/depends/patches/qt/no-xlib.patch deleted file mode 100644 index d6846aaca2c29..0000000000000 --- a/depends/patches/qt/no-xlib.patch +++ /dev/null @@ -1,69 +0,0 @@ -From 9563cef873ae82e06f60708d706d054717e801ce Mon Sep 17 00:00:00 2001 -From: Carl Dong -Date: Thu, 18 Jul 2019 17:22:05 -0400 -Subject: [PATCH] Wrap xlib related code blocks in #if's - -They are not necessary to compile QT. ---- - qtbase/src/plugins/platforms/xcb/qxcbcursor.cpp | 8 ++++++++ - 1 file changed, 8 insertions(+) - -diff --git a/qtbase/src/plugins/platforms/xcb/qxcbcursor.cpp b/qtbase/src/plugins/platforms/xcb/qxcbcursor.cpp -index 7c62c2e2b3..c05c6c0a07 100644 ---- a/qtbase/src/plugins/platforms/xcb/qxcbcursor.cpp -+++ b/qtbase/src/plugins/platforms/xcb/qxcbcursor.cpp -@@ -49,7 +49,9 @@ - #include - #include - #include -+#if QT_CONFIG(xcb_xlib) && QT_CONFIG(library) - #include -+#endif - #include - #include - -@@ -391,6 +391,7 @@ void QXcbCursor::changeCursor(QCursor *cursor, QWindow *window) - xcb_flush(xcb_connection()); - } - -+#if QT_CONFIG(xcb_xlib) && QT_CONFIG(library) - static int cursorIdForShape(int cshape) - { - int cursorId = 0; -@@ -444,6 +445,7 @@ static int cursorIdForShape(int cshape) - } - return cursorId; - } -+#endif - - xcb_cursor_t QXcbCursor::createNonStandardCursor(int cshape) - { -@@ -556,7 +558,9 @@ static xcb_cursor_t loadCursor(void *dpy, int cshape) - xcb_cursor_t QXcbCursor::createFontCursor(int cshape) - { - xcb_connection_t *conn = xcb_connection(); -+#if QT_CONFIG(xcb_xlib) && QT_CONFIG(library) - int cursorId = cursorIdForShape(cshape); -+#endif - xcb_cursor_t cursor = XCB_NONE; - - // Try Xcursor first -@@ -586,6 +590,7 @@ xcb_cursor_t QXcbCursor::createFontCursor(int cshape) - // Non-standard X11 cursors are created from bitmaps - cursor = createNonStandardCursor(cshape); - -+#if QT_CONFIG(xcb_xlib) && QT_CONFIG(library) - // Create a glpyh cursor if everything else failed - if (!cursor && cursorId) { - cursor = xcb_generate_id(conn); -@@ -593,6 +598,7 @@ xcb_cursor_t QXcbCursor::createFontCursor(int cshape) - cursorId, cursorId + 1, - 0xFFFF, 0xFFFF, 0xFFFF, 0, 0, 0); - } -+#endif - - if (cursor && cshape >= 0 && cshape < Qt::LastCursor && connection()->hasXFixes()) { - const char *name = cursorNames[cshape].front(); --- -2.22.0 - diff --git a/depends/patches/qt/qt.pro b/depends/patches/qt/qt.pro deleted file mode 100644 index 8f2e900a840fb..0000000000000 --- a/depends/patches/qt/qt.pro +++ /dev/null @@ -1,16 +0,0 @@ -# Create the super cache so modules will add themselves to it. -cache(, super) - -!QTDIR_build: cache(CONFIG, add, $$list(QTDIR_build)) - -prl = no_install_prl -CONFIG += $$prl -cache(CONFIG, add stash, prl) - -TEMPLATE = subdirs -SUBDIRS = qtbase qttools qttranslations - -qttools.depends = qtbase -qttranslations.depends = qttools - -load(qt_configure) diff --git a/depends/patches/qt/qtbase-moc-ignore-gcc-macro.patch b/depends/patches/qt/qtbase-moc-ignore-gcc-macro.patch deleted file mode 100644 index f0c14a9400e12..0000000000000 --- a/depends/patches/qt/qtbase-moc-ignore-gcc-macro.patch +++ /dev/null @@ -1,17 +0,0 @@ -The moc executable loops through headers on CPLUS_INCLUDE_PATH and stumbles -on the GCC internal _GLIBCXX_VISIBILITY macro. Tell it to ignore it as it is -not supposed to be looking there to begin with. - -Upstream report: https://bugreports.qt.io/browse/QTBUG-83160 - -diff --git a/qtbase/src/tools/moc/main.cpp b/qtbase/src/tools/moc/main.cpp ---- a/qtbase/src/tools/moc/main.cpp -+++ b/qtbase/src/tools/moc/main.cpp -@@ -238,6 +238,7 @@ int runMoc(int argc, char **argv) - dummyVariadicFunctionMacro.arguments += Symbol(0, PP_IDENTIFIER, "__VA_ARGS__"); - pp.macros["__attribute__"] = dummyVariadicFunctionMacro; - pp.macros["__declspec"] = dummyVariadicFunctionMacro; -+ pp.macros["_GLIBCXX_VISIBILITY"] = dummyVariadicFunctionMacro; - - QString filename; - QString output; diff --git a/depends/patches/qt/qttools_src.pro b/depends/patches/qt/qttools_src.pro deleted file mode 100644 index 6ef71a0942735..0000000000000 --- a/depends/patches/qt/qttools_src.pro +++ /dev/null @@ -1,6 +0,0 @@ -TEMPLATE = subdirs -SUBDIRS = linguist - -fb = force_bootstrap -CONFIG += $$fb -cache(CONFIG, add, fb) diff --git a/depends/patches/qt/rcc_hardcode_timestamp.patch b/depends/patches/qt/rcc_hardcode_timestamp.patch deleted file mode 100644 index 03f3897975646..0000000000000 --- a/depends/patches/qt/rcc_hardcode_timestamp.patch +++ /dev/null @@ -1,24 +0,0 @@ -Hardcode last modified timestamp in Qt RCC - -This change allows the already built qt package to be reused even with -the SOURCE_DATE_EPOCH variable set, e.g., for Guix builds. - - ---- old/qtbase/src/tools/rcc/rcc.cpp -+++ new/qtbase/src/tools/rcc/rcc.cpp -@@ -227,14 +227,7 @@ void RCCFileInfo::writeDataInfo(RCCResourceLibrary &lib) - - if (lib.formatVersion() >= 2) { - // last modified time stamp -- const QDateTime lastModified = m_fileInfo.lastModified(); -- quint64 lastmod = quint64(lastModified.isValid() ? lastModified.toMSecsSinceEpoch() : 0); -- static const quint64 sourceDate = 1000 * qgetenv("QT_RCC_SOURCE_DATE_OVERRIDE").toULongLong(); -- if (sourceDate != 0) -- lastmod = sourceDate; -- static const quint64 sourceDate2 = 1000 * qgetenv("SOURCE_DATE_EPOCH").toULongLong(); -- if (sourceDate2 != 0) -- lastmod = sourceDate2; -+ quint64 lastmod = quint64(1); - lib.writeNumber8(lastmod); - if (text || pass1) - lib.writeChar('\n'); diff --git a/depends/patches/qt/use_android_ndk23.patch b/depends/patches/qt/use_android_ndk23.patch deleted file mode 100644 index f22367d527bec..0000000000000 --- a/depends/patches/qt/use_android_ndk23.patch +++ /dev/null @@ -1,13 +0,0 @@ -Use Android NDK r23 LTS - ---- old/qtbase/mkspecs/features/android/default_pre.prf -+++ new/qtbase/mkspecs/features/android/default_pre.prf -@@ -76,7 +76,7 @@ else: equals(QT_ARCH, x86_64): CROSS_COMPILE = $$NDK_LLVM_PATH/bin/x86_64-linux- - else: equals(QT_ARCH, arm64-v8a): CROSS_COMPILE = $$NDK_LLVM_PATH/bin/aarch64-linux-android- - else: CROSS_COMPILE = $$NDK_LLVM_PATH/bin/arm-linux-androideabi- - --QMAKE_RANLIB = $${CROSS_COMPILE}ranlib -+QMAKE_RANLIB = $$NDK_LLVM_PATH/bin/llvm-ranlib - QMAKE_LINK_SHLIB = $$QMAKE_LINK - QMAKE_LFLAGS = - diff --git a/depends/patches/systemtap/fix_variadic_warning.patch b/depends/patches/systemtap/fix_variadic_warning.patch deleted file mode 100644 index 93cc2d6081d77..0000000000000 --- a/depends/patches/systemtap/fix_variadic_warning.patch +++ /dev/null @@ -1,16 +0,0 @@ -Could be dropped after a migration to C++20. -See: https://github.com/bitcoin/bitcoin/issues/26916. - -diff --git a/includes/sys/sdt.h b/includes/sys/sdt.h -index 4075a5f..7c6138c 100644 ---- a/includes/sys/sdt.h -+++ b/includes/sys/sdt.h -@@ -276,7 +276,7 @@ __extension__ extern unsigned long long __sdt_unsp; - _SDT_ASM_1(.purgem _SDT_TYPE_) \ - _SDT_ASM_1(.purgem _SDT_TYPE) - --#define _SDT_ASM_BODY(provider, name, pack_args, args, ...) \ -+#define _SDT_ASM_BODY(provider, name, pack_args, args) \ - _SDT_DEF_MACROS \ - _SDT_ASM_1(990: _SDT_NOP) \ - _SDT_ASM_3( .pushsection .note.stapsdt,_SDT_ASM_AUTOGROUP,"note") \ diff --git a/depends/patches/zeromq/netbsd_kevent_void.patch b/depends/patches/zeromq/netbsd_kevent_void.patch deleted file mode 100644 index 845c6bdda6719..0000000000000 --- a/depends/patches/zeromq/netbsd_kevent_void.patch +++ /dev/null @@ -1,57 +0,0 @@ -commit 129137d5182967dbfcfec66bad843df2a992a78f -Author: fanquake -Date: Mon Jan 3 20:13:33 2022 +0800 - - problem: kevent udata is now void* on NetBSD Current (10) - - solution: check for the intptr_t variant in configure. - -diff --git a/configure.ac b/configure.ac -index 1a571291..402f8b86 100644 ---- a/configure.ac -+++ b/configure.ac -@@ -307,6 +307,27 @@ case "${host_os}" in - if test "x$libzmq_netbsd_has_atomic" = "xno"; then - AC_DEFINE(ZMQ_FORCE_MUTEXES, 1, [Force to use mutexes]) - fi -+ # NetBSD Current (to become 10) has changed the type of udata in it's -+ # kevent struct from intptr_t to void * to align with darwin and other -+ # BSDs, see upstream commit: -+ # https://github.com/NetBSD/src/commit/e5ead823eb916b56589d2c6c560dbcfe4a2d0afc -+ AC_MSG_CHECKING([whether kevent udata type is intptr_t]) -+ AC_LANG_PUSH([C++]) -+ AC_LINK_IFELSE([AC_LANG_PROGRAM( -+ [[#include -+ #include -+ #include ]], -+ [[struct kevent ev; -+ intptr_t udata; -+ EV_SET(&ev, 0, 0, EV_ADD, 0, 0, udata); -+ return 0;]])], -+ [libzmq_netbsd_kevent_udata_intptr_t=yes], -+ [libzmq_netbsd_kevent_udata_intptr_t=no]) -+ AC_LANG_POP([C++]) -+ AC_MSG_RESULT([$libzmq_netbsd_kevent_udata_intptr_t]) -+ if test "x$libzmq_netbsd_kevent_udata_intptr_t" = "xyes"; then -+ AC_DEFINE(ZMQ_NETBSD_KEVENT_UDATA_INTPTR_T, 1, [kevent udata type is intptr_t]) -+ fi - ;; - *openbsd*|*bitrig*) - # Define on OpenBSD to enable all library features -diff --git a/src/kqueue.cpp b/src/kqueue.cpp -index 53d82ac4..a6a7a7f2 100644 ---- a/src/kqueue.cpp -+++ b/src/kqueue.cpp -@@ -46,9 +46,9 @@ - #include "i_poll_events.hpp" - #include "likely.hpp" - --// NetBSD defines (struct kevent).udata as intptr_t, everyone else --// as void *. --#if defined ZMQ_HAVE_NETBSD -+// NetBSD up to version 9 defines (struct kevent).udata as intptr_t, -+// everyone else as void *. -+#if defined ZMQ_HAVE_NETBSD && defined(ZMQ_NETBSD_KEVENT_UDATA_INTPTR_T) - #define kevent_udata_t intptr_t - #else - #define kevent_udata_t void * diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am index 3b4f961a9cf99..2499d49b576d7 100644 --- a/doc/man/Makefile.am +++ b/doc/man/Makefile.am @@ -1,23 +1,23 @@ dist_man1_MANS= -if BUILD_BITCOIND +if BUILD_NAVCOIND dist_man1_MANS+=navcoind.1 endif -if BUILD_BITCOIN_CLI +if BUILD_NAVCOIN_CLI dist_man1_MANS+=navcoin-cli.1 endif -if BUILD_BITCOIN_TX +if BUILD_NAVCOIN_TX dist_man1_MANS+=navcoin-tx.1 endif -if BUILD_BITCOIN_UTIL +if BUILD_NAVCOIN_UTIL dist_man1_MANS+=navcoin-util.1 endif if ENABLE_WALLET -if BUILD_BITCOIN_WALLET +if BUILD_NAVCOIN_WALLET dist_man1_MANS+=navcoin-wallet.1 endif endif diff --git a/libbitcoinconsensus.pc.in b/libnavcoinconsensus.pc.in similarity index 100% rename from libbitcoinconsensus.pc.in rename to libnavcoinconsensus.pc.in diff --git a/src/.bear-tidy-config b/src/.bear-tidy-config index b17a247a6c418..1e0870a93c422 100644 --- a/src/.bear-tidy-config +++ b/src/.bear-tidy-config @@ -4,8 +4,14 @@ "include_only_existing_source": true, "paths_to_include": [], "paths_to_exclude": [ + "src/crc32c", + "src/crypto/ctaes", "src/leveldb", - "src/bls" + "src/bls", + "src/minisketch", + "src/bench/nanobench.cpp", + "src/bench/nanobench.h", + "src/secp256k1" ] }, "format": { diff --git a/src/.clang-format b/src/.clang-format index 791b3b8f9f068..f20e5ee2d4ca1 100644 --- a/src/.clang-format +++ b/src/.clang-format @@ -43,5 +43,7 @@ SpacesInAngles: false SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false -Standard: c++17 +BreakBeforeConceptDeclarations: Always +RequiresExpressionIndentation: OuterScope +Standard: c++20 UseTab: Never diff --git a/src/.clang-tidy b/src/.clang-tidy index 39566c0cb7fcc..bfaa5ab8e7b09 100644 --- a/src/.clang-tidy +++ b/src/.clang-tidy @@ -1,11 +1,17 @@ Checks: ' -*, +bitcoin-*, bugprone-argument-comment, +bugprone-string-constructor, bugprone-use-after-move, +bugprone-lambda-function-name, misc-unused-using-decls, modernize-use-default-member-init, +modernize-use-emplace, +modernize-use-noexcept, modernize-use-nullptr, performance-*, +-performance-avoid-endl, -performance-inefficient-string-concatenation, -performance-no-int-to-ptr, -performance-noexcept-move-constructor, diff --git a/src/Makefile.am b/src/Makefile.am index 5c2a77d453890..3b59139608cbf 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -41,10 +41,10 @@ LIBBLS=bls/lib/libbls384_256.a if ENABLE_ZMQ LIBBITCOIN_ZMQ=libbitcoin_zmq.a endif -if BUILD_BITCOIN_LIBS -LIBBITCOINCONSENSUS=libbitcoinconsensus.la +if BUILD_NAVCOIN_LIBS +LIBNAVCOINCONSENSUS=libbitcoinconsensus.la endif -if BUILD_BITCOIN_KERNEL_LIB +if BUILD_NAVCOIN_KERNEL_LIB LIBBITCOINKERNEL=libbitcoinkernel.la endif if ENABLE_WALLET @@ -116,33 +116,33 @@ EXTRA_LIBRARIES += \ $(LIBBITCOIN_WALLET_TOOL) \ $(LIBBITCOIN_ZMQ) -if BUILD_BITCOIND +if BUILD_NAVCOIND bin_PROGRAMS += navcoind endif -if BUILD_BITCOIN_NODE +if BUILD_NAVCOIN_NODE bin_PROGRAMS += navcoin-node endif -if BUILD_BITCOIN_CLI +if BUILD_NAVCOIN_CLI bin_PROGRAMS += navcoin-cli endif -if BUILD_BITCOIN_TX +if BUILD_NAVCOIN_TX bin_PROGRAMS += navcoin-tx endif if ENABLE_WALLET -if BUILD_BITCOIN_WALLET +if BUILD_NAVCOIN_WALLET bin_PROGRAMS += navcoin-wallet endif endif -if BUILD_BITCOIN_UTIL +if BUILD_NAVCOIN_UTIL bin_PROGRAMS += navcoin-util endif -if BUILD_BITCOIN_CHAINSTATE +if BUILD_NAVCOIN_CHAINSTATE bin_PROGRAMS += navcoin-chainstate endif @@ -1111,12 +1111,12 @@ navcoin_chainstate_LDADD += $(LIBSECP256K1) $(LIBBLS) $(LIBMCL) # # bitcoinkernel library # -if BUILD_BITCOIN_KERNEL_LIB +if BUILD_NAVCOIN_KERNEL_LIB lib_LTLIBRARIES += $(LIBBITCOINKERNEL) libbitcoinkernel_la_LDFLAGS = $(AM_LDFLAGS) -no-undefined $(RELDFLAGS) $(PTHREAD_FLAGS) libbitcoinkernel_la_LIBADD = $(LIBBITCOIN_CRYPTO) $(LIBUNIVALUE) $(LIBLEVELDB) $(LIBMEMENV) $(LIBSECP256K1) $(LIBBLS) $(LIBMCL) -libbitcoinkernel_la_CPPFLAGS = $(AM_CPPFLAGS) -I$(builddir)/obj -I$(srcdir)/secp256k1/include $(BLS_INCLUDES) -DBUILD_BITCOIN_INTERNAL $(BOOST_CPPFLAGS) $(LEVELDB_CPPFLAGS) -I$(srcdir)/$(UNIVALUE_INCLUDE_DIR_INT) +libbitcoinkernel_la_CPPFLAGS = $(AM_CPPFLAGS) -I$(builddir)/obj -I$(srcdir)/secp256k1/include $(BLS_INCLUDES) -DBUILD_NAVCOIN_INTERNAL $(BOOST_CPPFLAGS) $(LEVELDB_CPPFLAGS) -I$(srcdir)/$(UNIVALUE_INCLUDE_DIR_INT) # libbitcoinkernel requires default symbol visibility, explicitly specify that # here so that things still work even when user configures with @@ -1256,12 +1256,12 @@ libbitcoinkernel_la_SOURCES = \ # Required for obj/build.h to be generated first. # More details: https://www.gnu.org/software/automake/manual/html_node/Built-Sources-Example.html libbitcoinkernel_la-clientversion.l$(OBJEXT): obj/build.h -endif # BUILD_BITCOIN_KERNEL_LIB +endif # BUILD_NAVCOIN_KERNEL_LIB # # bitcoinconsensus library # -if BUILD_BITCOIN_LIBS -lib_LTLIBRARIES += $(LIBBITCOINCONSENSUS) +if BUILD_NAVCOIN_LIBS +lib_LTLIBRARIES += $(LIBNAVCOINCONSENSUS) include_HEADERS = script/bitcoinconsensus.h libbitcoinconsensus_la_SOURCES = support/cleanse.cpp $(crypto_libbitcoin_crypto_base_la_SOURCES) $(libbitcoin_consensus_a_SOURCES) @@ -1271,7 +1271,7 @@ libbitcoinconsensus_la_LIBADD = \ $(LIBBLS) \ $(LIBMCL) \ $(LIBSECP256K1) -libbitcoinconsensus_la_CPPFLAGS = $(AM_CPPFLAGS) -I$(builddir)/obj -I$(srcdir)/secp256k1/include $(BLS_INCLUDES) -DBUILD_BITCOIN_INTERNAL +libbitcoinconsensus_la_CPPFLAGS = $(AM_CPPFLAGS) -I$(builddir)/obj -I$(srcdir)/secp256k1/include $(BLS_INCLUDES) -DBUILD_NAVCOIN_INTERNAL libbitcoinconsensus_la_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) endif @@ -1351,12 +1351,12 @@ CLEANFILES += $(LIBBLSCT_A) blsct/include/libblsct_a-blsct.o endif # -config/bitcoin-config.h: config/stamp-h1 +config/navcoin-config.h: config/stamp-h1 @$(MAKE) -C $(top_builddir) $(subdir)/$(@) -config/stamp-h1: $(top_srcdir)/$(subdir)/config/bitcoin-config.h.in $(top_builddir)/config.status +config/stamp-h1: $(top_srcdir)/$(subdir)/config/navcoin-config.h.in $(top_builddir)/config.status $(AM_V_at)$(MAKE) -C $(top_builddir) $(subdir)/$(@) -$(top_srcdir)/$(subdir)/config/bitcoin-config.h.in: $(am__configure_deps) - $(AM_V_at)$(MAKE) -C $(top_srcdir) $(subdir)/config/bitcoin-config.h.in +$(top_srcdir)/$(subdir)/config/navcoin-config.h.in: $(am__configure_deps) + $(AM_V_at)$(MAKE) -C $(top_srcdir) $(subdir)/config/navcoin-config.h.in clean-local: -$(MAKE) -C secp256k1 clean diff --git a/src/Makefile.test.include b/src/Makefile.test.include index 8070635cec192..9053ce0e57a9d 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -407,7 +407,7 @@ bitcoin_test_clean : FORCE rm -f $(CLEAN_BITCOIN_TEST) $(test_test_navcoin_OBJECTS) $(TEST_BINARY) check-local: $(BITCOIN_TESTS:.cpp=.cpp.test) -if BUILD_BITCOIN_TX +if BUILD_NAVCOIN_TX @echo "Running test/util/test_runner.py..." $(PYTHON) $(top_builddir)/test/util/test_runner.py endif diff --git a/src/bitcoin-chainstate.cpp b/src/bitcoin-chainstate.cpp index 52e697a78d90c..ed8f9244f10ba 100644 --- a/src/bitcoin-chainstate.cpp +++ b/src/bitcoin-chainstate.cpp @@ -47,8 +47,8 @@ int main(int argc, char* argv[]) << " BREAK IN FUTURE VERSIONS. DO NOT USE ON YOUR ACTUAL DATADIR." << std::endl; return 1; } - std::filesystem::path abs_datadir = std::filesystem::absolute(argv[1]); - std::filesystem::create_directories(abs_datadir); + fs::path abs_datadir{fs::absolute(argv[1])}; + fs::create_directories(abs_datadir); gArgs.ForceSetArg("-datadir", abs_datadir.string()); diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp index ddb1701814929..bb6317bc35db9 100644 --- a/src/bitcoin-cli.cpp +++ b/src/bitcoin-cli.cpp @@ -4,7 +4,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp index cdcdbc20867e3..7f54f1c756240 100644 --- a/src/bitcoin-tx.cpp +++ b/src/bitcoin-tx.cpp @@ -3,7 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/bitcoin-util.cpp b/src/bitcoin-util.cpp index d84a5b940e9e6..838a58a81f674 100644 --- a/src/bitcoin-util.cpp +++ b/src/bitcoin-util.cpp @@ -3,7 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/bitcoin-wallet.cpp b/src/bitcoin-wallet.cpp index 16daaf30c4096..b97ebb49bcd72 100644 --- a/src/bitcoin-wallet.cpp +++ b/src/bitcoin-wallet.cpp @@ -3,7 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp index 0feac330266d0..eda955a35f6ec 100644 --- a/src/bitcoind.cpp +++ b/src/bitcoind.cpp @@ -4,7 +4,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/blsct/arith/elements.cpp b/src/blsct/arith/elements.cpp index e7275ac994231..f533b0769b0fe 100644 --- a/src/blsct/arith/elements.cpp +++ b/src/blsct/arith/elements.cpp @@ -114,7 +114,7 @@ template size_t Elements::Size() const; template void Elements::Add(const T& x) { - m_vec.push_back(x); + m_vec.emplace_back(x); } template void Elements::Add(const MclScalar&); template void Elements::Add(const MclG1Point&); @@ -144,7 +144,7 @@ Elements Elements::FirstNPow(const T& k, const size_t& n, const size_t& fr T x(1); for (size_t i = 0; i < n + from_index; ++i) { if (i >= from_index) { - ret.m_vec.push_back(x); + ret.m_vec.emplace_back(x); } x = x * k; } @@ -157,7 +157,7 @@ Elements Elements::RepeatN(const T& k, const size_t& n) { Elements ret; for (size_t i = 0; i < n; ++i) { - ret.m_vec.push_back(k); + ret.m_vec.emplace_back(k); } return ret; } @@ -170,7 +170,7 @@ Elements Elements::RandVec(const size_t& n, const bool exclude_zero) Elements ret; for (size_t i = 0; i < n; ++i) { auto x = T::Rand(exclude_zero); - ret.m_vec.push_back(x); + ret.m_vec.emplace_back(x); } return ret; } @@ -184,7 +184,7 @@ Elements Elements::operator*(const Elements& rhs) const Elements ret; for (size_t i = 0; i < m_vec.size(); ++i) { - ret.m_vec.push_back(m_vec[i] * rhs[i]); + ret.m_vec.emplace_back(m_vec[i] * rhs[i]); } return ret; } @@ -197,7 +197,7 @@ Elements Elements::operator*(const Scalar& rhs) const { Elements ret; for (size_t i = 0; i < m_vec.size(); ++i) { - ret.m_vec.push_back(m_vec[i] * rhs); + ret.m_vec.emplace_back(m_vec[i] * rhs); } return ret; } @@ -211,7 +211,7 @@ Elements Elements::operator+(const Elements& rhs) const Elements ret; for (size_t i = 0; i < m_vec.size(); ++i) { - ret.m_vec.push_back(m_vec[i] + rhs.m_vec[i]); + ret.m_vec.emplace_back(m_vec[i] + rhs.m_vec[i]); } return ret; } @@ -225,7 +225,7 @@ Elements Elements::operator-(const Elements& rhs) const Elements ret; for (size_t i = 0; i < m_vec.size(); ++i) { - ret.m_vec.push_back(m_vec[i] - rhs.m_vec[i]); + ret.m_vec.emplace_back(m_vec[i] - rhs.m_vec[i]); } return ret; } @@ -238,7 +238,7 @@ void Elements::operator=(const Elements& rhs) m_vec.clear(); for (size_t i = 0; i < rhs.m_vec.size(); ++i) { auto copy = T(rhs.m_vec[i]); - m_vec.push_back(copy); + m_vec.emplace_back(copy); } } template void Elements::operator=(const Elements&); @@ -276,7 +276,7 @@ Elements Elements::From(const size_t from_index) const Elements ret; for (size_t i = from_index; i < m_vec.size(); ++i) { - ret.m_vec.push_back(m_vec[i]); + ret.m_vec.emplace_back(m_vec[i]); } return ret; } @@ -292,7 +292,7 @@ Elements Elements::To(const size_t to_index) const Elements ret; for (size_t i = 0; i < to_index; ++i) { - ret.m_vec.push_back(m_vec[i]); + ret.m_vec.emplace_back(m_vec[i]); } return ret; } diff --git a/src/blsct/arith/mcl/mcl_g1point.cpp b/src/blsct/arith/mcl/mcl_g1point.cpp index fde515e08775a..00433cad0de6b 100644 --- a/src/blsct/arith/mcl/mcl_g1point.cpp +++ b/src/blsct/arith/mcl/mcl_g1point.cpp @@ -78,7 +78,7 @@ std::vector MclG1Point::operator*(const std::vector MclScalar::ToBinaryVec() const auto bitStr = GetString(2); std::vector vec; for (auto& c : bitStr) { - vec.push_back(c == '0' ? 0 : 1); + vec.emplace_back(c == '0' ? 0 : 1); } return vec; } diff --git a/src/blsct/arith/mcl/mcl_util.h b/src/blsct/arith/mcl/mcl_util.h index ec078fabe3dba..6c9224dfed5a4 100644 --- a/src/blsct/arith/mcl/mcl_util.h +++ b/src/blsct/arith/mcl/mcl_util.h @@ -20,8 +20,8 @@ struct MclUtil { std::vector exps; for (auto point: points) { - bases.push_back(point.m_base.GetUnderlying()); - exps.push_back(point.m_exp.GetUnderlying()); + bases.emplace_back(point.m_base.GetUnderlying()); + exps.emplace_back(point.m_exp.GetUnderlying()); } MclG1Point::Underlying pv; mclBnG1_mulVec(&pv, bases.data(), exps.data(), points.size()); diff --git a/src/blsct/bech32_mod.cpp b/src/blsct/bech32_mod.cpp index 2d0a5d0680c5d..6dceb1a56f6c4 100644 --- a/src/blsct/bech32_mod.cpp +++ b/src/blsct/bech32_mod.cpp @@ -128,18 +128,18 @@ bool CheckCharacters(const std::string& str, std::vector& errors) unsigned char c{(unsigned char)(str[i])}; if (c >= 'a' && c <= 'z') { if (upper) { - errors.push_back(i); + errors.emplace_back(i); } else { lower = true; } } else if (c >= 'A' && c <= 'Z') { if (lower) { - errors.push_back(i); + errors.emplace_back(i); } else { upper = true; } } else if (c < 33 || c > 126) { - errors.push_back(i); + errors.emplace_back(i); } } return errors.empty(); diff --git a/src/blsct/building_block/lazy_points.cpp b/src/blsct/building_block/lazy_points.cpp index a77f15c665fcc..ac640063b0d60 100644 --- a/src/blsct/building_block/lazy_points.cpp +++ b/src/blsct/building_block/lazy_points.cpp @@ -12,14 +12,14 @@ LazyPoints::LazyPoints(const Elements& bases, const Elemen throw std::runtime_error("sizes of bases and exps don't match"); } for (size_t i=0; i(bases[i], exps[i])); + m_points.emplace_back(LazyPoint(bases[i], exps[i])); } } template LazyPoints::LazyPoints(const Elements& bases, const Elements& exps); template void LazyPoints::Add(const LazyPoint& point) { - m_points.push_back(point); + m_points.emplace_back(point); } template void LazyPoints::Add(const LazyPoint& point); diff --git a/src/blsct/common.cpp b/src/blsct/common.cpp index 99ca312140acb..43479e783019d 100644 --- a/src/blsct/common.cpp +++ b/src/blsct/common.cpp @@ -26,7 +26,7 @@ std::vector blsct::Common::TrimPreceedingZeros( bool should_take = false; for (auto x : vec) { if (!should_take && x) should_take = true; - if (should_take) trimmed_vec.push_back(x); + if (should_take) trimmed_vec.emplace_back(x); } return trimmed_vec; } @@ -39,7 +39,7 @@ template void blsct::Common::AddZeroIfEmpty(std::vector& vec) { if (vec.size() == 0) { - vec.push_back(0); + vec.emplace_back(0); } } template diff --git a/src/blsct/public_keys.cpp b/src/blsct/public_keys.cpp index 2b3595b93515a..885206f2f9063 100644 --- a/src/blsct/public_keys.cpp +++ b/src/blsct/public_keys.cpp @@ -79,9 +79,9 @@ bool PublicKeys::VerifyBatch(const std::vector& msgs, const auto msg = msgs.begin(); for (auto pk = m_pks.begin(), end = m_pks.end(); pk != end; ++pk, ++msg) { if (*msg == blsct::Common::BLSCTBALANCE && fVerifyTx) { - aug_msgs.push_back(*msg); + aug_msgs.emplace_back(*msg); } else { - aug_msgs.push_back(pk->AugmentMessage(*msg)); + aug_msgs.emplace_back(pk->AugmentMessage(*msg)); } } return CoreAggregateVerify(aug_msgs, sig); diff --git a/src/blsct/range_proof/bulletproofs/range_proof_logic.cpp b/src/blsct/range_proof/bulletproofs/range_proof_logic.cpp index d672e76db697f..24a6c6a47edcd 100644 --- a/src/blsct/range_proof/bulletproofs/range_proof_logic.cpp +++ b/src/blsct/range_proof/bulletproofs/range_proof_logic.cpp @@ -348,7 +348,7 @@ bool RangeProofLogic::Verify( // derive transcript from the proof auto proof_transcript = RangeProofWithTranscript::Build(proof); - proof_transcripts.push_back(proof_transcript); + proof_transcripts.emplace_back(proof_transcript); } const size_t max_mn = 1ull << max_num_rounds; @@ -427,7 +427,7 @@ AmountRecoveryResult RangeProofLogic::RecoverAmounts( req.nonce.GetHashWithSalt(100), // gamma for vs[0] msg_amt.msg); - xs.push_back(x); + xs.emplace_back(x); } return { true, diff --git a/src/blsct/range_proof/bulletproofs_plus/range_proof_logic.cpp b/src/blsct/range_proof/bulletproofs_plus/range_proof_logic.cpp index 2743c6d8e093d..dc96b0f863103 100644 --- a/src/blsct/range_proof/bulletproofs_plus/range_proof_logic.cpp +++ b/src/blsct/range_proof/bulletproofs_plus/range_proof_logic.cpp @@ -507,7 +507,7 @@ bool RangeProofLogic::Verify( // derive transcript from the proof auto proof_transcript = RangeProofWithTranscript::Build(proof); - proof_transcripts.push_back(proof_transcript); + proof_transcripts.emplace_back(proof_transcript); } const size_t max_mn = 1ull << max_num_rounds; @@ -601,7 +601,7 @@ AmountRecoveryResult RangeProofLogic::RecoverAmounts( req.nonce.GetHashWithSalt(100), // gamma for vs[0] msg_amt.msg ); - xs.push_back(x); + xs.emplace_back(x); Scalar msg2_scalar = ((req.tau_x - (tau2 * req.y.Square()) - (req.z.Square() * gamma_vs0)) * req.y.Invert()) - tau1; std::vector msg2 = msg2_scalar.GetVch(true); diff --git a/src/blsct/set_mem_proof/verifying_equations.md b/src/blsct/set_mem_proof/verifying_equations.md index 4e1062d74a3e5..a4be0e75df53a 100644 --- a/src/blsct/set_mem_proof/verifying_equations.md +++ b/src/blsct/set_mem_proof/verifying_equations.md @@ -108,7 +108,7 @@ for (size_t i=0; i KeyMan::RecoverOutputs(const std::vector for (size_t i = 0; i < outs.size(); i++) { CTxOut out = outs[i]; auto nonce = out.blsctData.blindingKey * viewKey.GetScalar(); - reqs.push_back(bulletproofs::AmountRecoveryRequest::of({out.blsctData.rangeProof}, nonce)); + reqs.emplace_back(bulletproofs::AmountRecoveryRequest::of({out.blsctData.rangeProof}, nonce)); } return rp.RecoverAmounts(reqs); diff --git a/src/blsct/wallet/txfactory.cpp b/src/blsct/wallet/txfactory.cpp index 2d9cb7baa3611..e325c9d8edb18 100644 --- a/src/blsct/wallet/txfactory.cpp +++ b/src/blsct/wallet/txfactory.cpp @@ -26,7 +26,7 @@ void TxFactory::AddOutput(const SubAddress& destination, const CAmount& nAmount, if (vOutputs.count(tokenId) <= 0) vOutputs[tokenId] = std::vector(); - vOutputs[tokenId].push_back(out); + vOutputs[tokenId].emplace_back(out); } bool TxFactory::AddInput(const CCoinsViewCache& cache, const COutPoint& outpoint, const bool& rbf) @@ -99,31 +99,31 @@ std::optional TxFactory::BuildTx() for (auto& in_ : vInputs) { for (auto& in : in_.second) { - tx.vin.push_back(in.in); + tx.vin.emplace_back(in.in); gammaAcc = gammaAcc + in.gamma; - txSigs.push_back(in.sk.Sign(in.in.GetHash())); + txSigs.emplace_back(in.sk.Sign(in.in.GetHash())); } } for (auto& out_ : vOutputs) { for (auto& out : out_.second) { - tx.vout.push_back(out.out); + tx.vout.emplace_back(out.out); gammaAcc = gammaAcc - out.gamma; - txSigs.push_back(PrivateKey(out.blindingKey).Sign(out.out.GetHash())); + txSigs.emplace_back(PrivateKey(out.blindingKey).Sign(out.out.GetHash())); } } for (auto& change : mapChange) { auto changeOutput = CreateOutput(std::get(km->GetNewDestination(-1).value()), change.second, "Change", change.first); - tx.vout.push_back(changeOutput.out); + tx.vout.emplace_back(changeOutput.out); gammaAcc = gammaAcc - changeOutput.gamma; - txSigs.push_back(PrivateKey(changeOutput.blindingKey).Sign(changeOutput.out.GetHash())); + txSigs.emplace_back(PrivateKey(changeOutput.blindingKey).Sign(changeOutput.out.GetHash())); } if (nFee == (long long)(BLSCT_DEFAULT_FEE * (tx.vin.size() + tx.vout.size()))) { CTxOut fee_out{nFee, CScript(OP_RETURN)}; - tx.vout.push_back(fee_out); - txSigs.push_back(PrivateKey(gammaAcc).SignBalance()); + tx.vout.emplace_back(fee_out); + txSigs.emplace_back(PrivateKey(gammaAcc).SignBalance()); tx.txSig = Signature::Aggregate(txSigs); return tx; } @@ -156,4 +156,4 @@ std::optional TxFactory::CreateTransaction(wallet::CWallet* return tx.BuildTx(); } -} // namespace blsct \ No newline at end of file +} // namespace blsct diff --git a/src/blsct/wallet/txfactory_global.cpp b/src/blsct/wallet/txfactory_global.cpp index 2ea1e0d0b34d6..86a1230339279 100644 --- a/src/blsct/wallet/txfactory_global.cpp +++ b/src/blsct/wallet/txfactory_global.cpp @@ -36,8 +36,8 @@ Signature UnsignedOutput::GetSignature() const { std::vector txSigs; - txSigs.push_back(blsct::PrivateKey(blindingKey).Sign(out.GetHash())); - txSigs.push_back(blsct::PrivateKey(gamma.Negate()).SignBalance()); + txSigs.emplace_back(blsct::PrivateKey(blindingKey).Sign(out.GetHash())); + txSigs.emplace_back(blsct::PrivateKey(gamma.Negate()).SignBalance()); return Signature::Aggregate(txSigs); } @@ -94,20 +94,20 @@ CTransactionRef AggregateTransactions(const std::vector& txs) CAmount nFee = 0; for (auto& tx : txs) { - vSigs.push_back(tx->txSig); + vSigs.emplace_back(tx->txSig); for (auto& in : tx->vin) { - ret.vin.push_back(in); + ret.vin.emplace_back(in); } for (auto& out : tx->vout) { if (out.scriptPubKey.IsFee()) { nFee += out.nValue; continue; } - ret.vout.push_back(out); + ret.vout.emplace_back(out); } } - ret.vout.push_back(CTxOut{nFee, CScript{OP_RETURN}}); + ret.vout.emplace_back(CTxOut{nFee, CScript{OP_RETURN}}); ret.txSig = blsct::Signature::Aggregate(vSigs); ret.nVersion = CTransaction::BLSCT_MARKER; diff --git a/src/blsct/wallet/verification.cpp b/src/blsct/wallet/verification.cpp index 67c973673f5e7..c741cf5d0459e 100644 --- a/src/blsct/wallet/verification.cpp +++ b/src/blsct/wallet/verification.cpp @@ -32,9 +32,9 @@ bool VerifyTx(const CTransaction& tx, const CCoinsViewCache& view, const CAmount return false; } - vPubKeys.push_back(coin.out.blsctData.spendingKey); + vPubKeys.emplace_back(coin.out.blsctData.spendingKey); auto in_hash = in.GetHash(); - vMessages.push_back(Message(in_hash.begin(), in_hash.end())); + vMessages.emplace_back(Message(in_hash.begin(), in_hash.end())); balanceKey = balanceKey + coin.out.blsctData.rangeProof.Vs[0]; } } @@ -43,10 +43,10 @@ bool VerifyTx(const CTransaction& tx, const CCoinsViewCache& view, const CAmount for (auto& out : tx.vout) { if (out.IsBLSCT()) { - vPubKeys.push_back(out.blsctData.ephemeralKey); + vPubKeys.emplace_back(out.blsctData.ephemeralKey); auto out_hash = out.GetHash(); - vMessages.push_back(Message(out_hash.begin(), out_hash.end())); - vProofs.push_back(out.blsctData.rangeProof); + vMessages.emplace_back(Message(out_hash.begin(), out_hash.end())); + vProofs.emplace_back(out.blsctData.rangeProof); balanceKey = balanceKey - out.blsctData.rangeProof.Vs[0]; } else { if (!out.scriptPubKey.IsUnspendable() && out.nValue > 0) { @@ -62,8 +62,8 @@ bool VerifyTx(const CTransaction& tx, const CCoinsViewCache& view, const CAmount } } - vMessages.push_back(blsct::Common::BLSCTBALANCE); - vPubKeys.push_back(balanceKey); + vMessages.emplace_back(blsct::Common::BLSCTBALANCE); + vPubKeys.emplace_back(balanceKey); return PublicKeys{vPubKeys}.VerifyBatch(vMessages, tx.txSig, true) && rp.Verify(vProofs); diff --git a/src/clientversion.h b/src/clientversion.h index 9da0cd0b39428..8a39f9ac13fa1 100644 --- a/src/clientversion.h +++ b/src/clientversion.h @@ -8,12 +8,12 @@ #include #if defined(HAVE_CONFIG_H) -#include +#include #endif //HAVE_CONFIG_H // Check that required client information is defined #if !defined(CLIENT_VERSION_MAJOR) || !defined(CLIENT_VERSION_MINOR) || !defined(CLIENT_VERSION_BUILD) || !defined(CLIENT_VERSION_IS_RELEASE) || !defined(COPYRIGHT_YEAR) -#error Client version information missing: version is not defined by bitcoin-config.h or in any other way +#error Client version information missing: version is not defined by navcoin-config.h or in any other way #endif //! Copyright string used in Windows .rc files diff --git a/src/common/args.cpp b/src/common/args.cpp index b3cdc5dec12e2..4e251857ecef6 100644 --- a/src/common/args.cpp +++ b/src/common/args.cpp @@ -217,7 +217,7 @@ bool ArgsManager::ParseParameters(int argc, const char* const argv[], std::strin m_command.push_back(key); while (++i < argc) { // The remaining args are command args - m_command.push_back(argv[i]); + m_command.emplace_back(argv[i]); } break; } diff --git a/src/common/run_command.cpp b/src/common/run_command.cpp index 6ad9f75b5d99f..ab0205cbd4b7d 100644 --- a/src/common/run_command.cpp +++ b/src/common/run_command.cpp @@ -3,7 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/compat/byteswap.h b/src/compat/byteswap.h index 9ee71ef267d3a..c7f159839cee3 100644 --- a/src/compat/byteswap.h +++ b/src/compat/byteswap.h @@ -6,7 +6,7 @@ #define BITCOIN_COMPAT_BYTESWAP_H #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/compat/compat.h b/src/compat/compat.h index 8195bceaecb86..22e7091c8ec10 100644 --- a/src/compat/compat.h +++ b/src/compat/compat.h @@ -7,7 +7,7 @@ #define BITCOIN_COMPAT_COMPAT_H #if defined(HAVE_CONFIG_H) -#include +#include #endif // Windows defines FD_SETSIZE to 64 (see _fd_types.h in mingw-w64), diff --git a/src/compat/endian.h b/src/compat/endian.h index 882de2dbf0e49..7fd4f315713fc 100644 --- a/src/compat/endian.h +++ b/src/compat/endian.h @@ -6,7 +6,7 @@ #define BITCOIN_COMPAT_ENDIAN_H #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/crypto/chacha_poly_aead.cpp b/src/crypto/chacha_poly_aead.cpp index 119ad6902f9e0..48393f8bb7b5f 100644 --- a/src/crypto/chacha_poly_aead.cpp +++ b/src/crypto/chacha_poly_aead.cpp @@ -3,7 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/crypto/common.h b/src/crypto/common.h index dc12ed9942ffc..0cf2abb2dda2e 100644 --- a/src/crypto/common.h +++ b/src/crypto/common.h @@ -6,7 +6,7 @@ #define BITCOIN_CRYPTO_COMMON_H #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/crypto/muhash.h b/src/crypto/muhash.h index b56efa97b9ffd..e79f522283097 100644 --- a/src/crypto/muhash.h +++ b/src/crypto/muhash.h @@ -6,7 +6,7 @@ #define BITCOIN_CRYPTO_MUHASH_H #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/crypto/sha256.cpp b/src/crypto/sha256.cpp index a4eef36480dcd..65a6d033cd4ad 100644 --- a/src/crypto/sha256.cpp +++ b/src/crypto/sha256.cpp @@ -10,12 +10,12 @@ #include -#if defined(__linux__) && defined(ENABLE_ARM_SHANI) && !defined(BUILD_BITCOIN_INTERNAL) +#if defined(__linux__) && defined(ENABLE_ARM_SHANI) && !defined(BUILD_NAVCOIN_INTERNAL) #include #include #endif -#if defined(MAC_OSX) && defined(ENABLE_ARM_SHANI) && !defined(BUILD_BITCOIN_INTERNAL) +#if defined(MAC_OSX) && defined(ENABLE_ARM_SHANI) && !defined(BUILD_NAVCOIN_INTERNAL) #include #include #endif @@ -604,7 +604,7 @@ std::string SHA256AutoDetect() have_x86_shani = (ebx >> 29) & 1; } -#if defined(ENABLE_X86_SHANI) && !defined(BUILD_BITCOIN_INTERNAL) +#if defined(ENABLE_X86_SHANI) && !defined(BUILD_NAVCOIN_INTERNAL) if (have_x86_shani) { Transform = sha256_x86_shani::Transform; TransformD64 = TransformD64Wrapper; @@ -621,13 +621,13 @@ std::string SHA256AutoDetect() TransformD64 = TransformD64Wrapper; ret = "sse4(1way)"; #endif -#if defined(ENABLE_SSE41) && !defined(BUILD_BITCOIN_INTERNAL) +#if defined(ENABLE_SSE41) && !defined(BUILD_NAVCOIN_INTERNAL) TransformD64_4way = sha256d64_sse41::Transform_4way; ret += ",sse41(4way)"; #endif } -#if defined(ENABLE_AVX2) && !defined(BUILD_BITCOIN_INTERNAL) +#if defined(ENABLE_AVX2) && !defined(BUILD_NAVCOIN_INTERNAL) if (have_avx2 && have_avx && enabled_avx) { TransformD64_8way = sha256d64_avx2::Transform_8way; ret += ",avx2(8way)"; @@ -635,7 +635,7 @@ std::string SHA256AutoDetect() #endif #endif // defined(USE_ASM) && defined(HAVE_GETCPUID) -#if defined(ENABLE_ARM_SHANI) && !defined(BUILD_BITCOIN_INTERNAL) +#if defined(ENABLE_ARM_SHANI) && !defined(BUILD_NAVCOIN_INTERNAL) bool have_arm_shani = false; #if defined(__linux__) diff --git a/src/httpserver.cpp b/src/httpserver.cpp index 4f8a2b4d8dd69..1adb310e11035 100644 --- a/src/httpserver.cpp +++ b/src/httpserver.cpp @@ -3,7 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/init.cpp b/src/init.cpp index 4c8482e1271dd..c47c4b3662400 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -4,7 +4,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/init/common.cpp b/src/init/common.cpp index 9a52a09cea086..d962e52906ff5 100644 --- a/src/init/common.cpp +++ b/src/init/common.cpp @@ -3,7 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/mapport.cpp b/src/mapport.cpp index 994fd12cf5068..a6532876de0ad 100644 --- a/src/mapport.cpp +++ b/src/mapport.cpp @@ -3,7 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/net.cpp b/src/net.cpp index 3a039ff1fb86d..113f73dd5de90 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -4,7 +4,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/netaddress.h b/src/netaddress.h index 3d15b0b12345f..2653e584ce162 100644 --- a/src/netaddress.h +++ b/src/netaddress.h @@ -6,7 +6,7 @@ #define BITCOIN_NETADDRESS_H #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/netbase.h b/src/netbase.h index a43f22f2407d3..d06e681cb5b6c 100644 --- a/src/netbase.h +++ b/src/netbase.h @@ -6,7 +6,7 @@ #define BITCOIN_NETBASE_H #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp index 188bbc5f452b6..3df5b142b1bd1 100644 --- a/src/node/interfaces.cpp +++ b/src/node/interfaces.cpp @@ -50,7 +50,7 @@ #include #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/psbt.h b/src/psbt.h index 9464b10268a5f..4d881cc345c51 100644 --- a/src/psbt.h +++ b/src/psbt.h @@ -874,7 +874,7 @@ struct PSBTOutput if ((leaf_ver & ~TAPROOT_LEAF_MASK) != 0) { throw std::ios_base::failure("Output Taproot tree has a leaf with an invalid leaf version"); } - m_tap_tree.push_back(std::make_tuple(depth, leaf_ver, script)); + m_tap_tree.emplace_back(std::make_tuple(depth, leaf_ver, script)); builder.Add((int)depth, script, (int)leaf_ver, /*track=*/true); } if (!builder.IsComplete()) { diff --git a/src/randomenv.cpp b/src/randomenv.cpp index 581612bccf38c..6907fb4f4b6fa 100644 --- a/src/randomenv.cpp +++ b/src/randomenv.cpp @@ -4,7 +4,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/script/bitcoinconsensus.cpp b/src/script/bitcoinconsensus.cpp index 4fab481b39894..9e7834c1c5d65 100644 --- a/src/script/bitcoinconsensus.cpp +++ b/src/script/bitcoinconsensus.cpp @@ -119,5 +119,5 @@ int bitcoinconsensus_verify_script(const unsigned char *scriptPubKey, unsigned i unsigned int bitcoinconsensus_version() { // Just use the API version for now - return BITCOINCONSENSUS_API_VER; + return NAVCOINCONSENSUS_API_VER; } diff --git a/src/script/bitcoinconsensus.h b/src/script/bitcoinconsensus.h index f2f2ff8686eee..48a4c90bff73f 100644 --- a/src/script/bitcoinconsensus.h +++ b/src/script/bitcoinconsensus.h @@ -8,8 +8,8 @@ #include -#if defined(BUILD_BITCOIN_INTERNAL) && defined(HAVE_CONFIG_H) -#include +#if defined(BUILD_NAVCOIN_INTERNAL) && defined(HAVE_CONFIG_H) +#include #if defined(_WIN32) #if defined(HAVE_DLLEXPORT_ATTRIBUTE) #define EXPORT_SYMBOL __declspec(dllexport) @@ -19,7 +19,7 @@ #elif defined(HAVE_DEFAULT_VISIBILITY_ATTRIBUTE) #define EXPORT_SYMBOL __attribute__ ((visibility ("default"))) #endif -#elif defined(MSC_VER) && !defined(STATIC_LIBBITCOINCONSENSUS) +#elif defined(MSC_VER) && !defined(STATIC_LIBNAVCOINCONSENSUS) #define EXPORT_SYMBOL __declspec(dllimport) #endif @@ -31,7 +31,7 @@ extern "C" { #endif -#define BITCOINCONSENSUS_API_VER 1 +#define NAVCOINCONSENSUS_API_VER 1 typedef enum bitcoinconsensus_error_t { diff --git a/src/script/sign.cpp b/src/script/sign.cpp index 85589fe86b9d3..d73f115420267 100644 --- a/src/script/sign.cpp +++ b/src/script/sign.cpp @@ -515,7 +515,7 @@ bool ProduceSignature(const SigningProvider& provider, const BaseSignatureCreato const auto ms = miniscript::FromScript(witnessscript, ms_satisfier); solved = ms && ms->Satisfy(ms_satisfier, result) == miniscript::Availability::YES; } - result.push_back(std::vector(witnessscript.begin(), witnessscript.end())); + result.emplace_back(std::vector(witnessscript.begin(), witnessscript.end())); sigdata.scriptWitness.stack = result; sigdata.witness = true; @@ -532,7 +532,7 @@ bool ProduceSignature(const SigningProvider& provider, const BaseSignatureCreato if (!sigdata.witness) sigdata.scriptWitness.stack.clear(); if (P2SH) { - result.push_back(std::vector(subscript.begin(), subscript.end())); + result.emplace_back(std::vector(subscript.begin(), subscript.end())); } sigdata.scriptSig = PushAll(result); diff --git a/src/shutdown.cpp b/src/shutdown.cpp index 2fffc0663c7bb..bf73576958441 100644 --- a/src/shutdown.cpp +++ b/src/shutdown.cpp @@ -6,7 +6,7 @@ #include #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp index f92d1d8fb7dd9..4d6cc2b545b7c 100644 --- a/src/support/lockedpool.cpp +++ b/src/support/lockedpool.cpp @@ -6,7 +6,7 @@ #include #if defined(HAVE_CONFIG_H) -#include +#include #endif #ifdef WIN32 diff --git a/src/sync.cpp b/src/sync.cpp index 46218056539be..74971b46a54e4 100644 --- a/src/sync.cpp +++ b/src/sync.cpp @@ -3,7 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/test/blsct/wallet/address_tests.cpp b/src/test/blsct/wallet/address_tests.cpp index 5fbd00738bb2a..6c670774dcae5 100644 --- a/src/test/blsct/wallet/address_tests.cpp +++ b/src/test/blsct/wallet/address_tests.cpp @@ -31,8 +31,8 @@ BOOST_FIXTURE_TEST_CASE(address_test, BasicTestingSetup) blsct::PublicKey viewKey; blsct::PublicKey spendKey; - BOOST_ASSERT(subAddressDoubleKey.GetViewKey(viewKey)); - BOOST_ASSERT(subAddressDoubleKey.GetSpendKey(spendKey)); + Assert(subAddressDoubleKey.GetViewKey(viewKey)); + Assert(subAddressDoubleKey.GetSpendKey(spendKey)); BOOST_CHECK(viewKey.ToString() == "809ad665b3de4e1d44d835f1b8de36aafeea3279871aeceb56dbdda90c0426c022e8a6dda7313dc5e4c1817287805e3b"); diff --git a/src/test/blsct/wallet/chain_tests.cpp b/src/test/blsct/wallet/chain_tests.cpp index aeb0857b0b8fa..3661cf90629bc 100644 --- a/src/test/blsct/wallet/chain_tests.cpp +++ b/src/test/blsct/wallet/chain_tests.cpp @@ -41,12 +41,12 @@ BOOST_FIXTURE_TEST_CASE(SyncTest, TestBLSCTChain100Setup) auto available_coins = AvailableCoins(*wallet); std::vector coins = available_coins.All(); - BOOST_ASSERT(coins.size() == 1); + Assert(coins.size() == 1); // Create Transaction sending to another address auto tx = blsct::TxFactory::CreateTransaction(wallet.get(), wallet->GetOrCreateBLSCTKeyMan(), blsct::SubAddress(), 1 * COIN, "test"); - BOOST_ASSERT(tx != std::nullopt); + Assert(tx != std::nullopt); auto block = CreateAndProcessBlock({tx.value()}, walletDestination); diff --git a/src/test/blsct/wallet/txfactory_tests.cpp b/src/test/blsct/wallet/txfactory_tests.cpp index a3aca5ad37041..e2c9f33c7465c 100644 --- a/src/test/blsct/wallet/txfactory_tests.cpp +++ b/src/test/blsct/wallet/txfactory_tests.cpp @@ -29,12 +29,12 @@ BOOST_FIXTURE_TEST_CASE(ismine_test, TestingSetup) auto recvAddress = std::get(blsct_km->GetNewDestination(0).value()); auto out = blsct::CreateOutput(recvAddress, 1000, "test"); - BOOST_ASSERT(blsct_km->IsMine(out.out)); + Assert(blsct_km->IsMine(out.out)); auto hashId = blsct_km->GetHashId(out.out); blsct::SubAddress subAddressId; - BOOST_ASSERT(blsct_km->GetSubAddress(hashId, subAddressId)); + Assert(blsct_km->GetSubAddress(hashId, subAddressId)); auto result = blsct_km->RecoverOutputs({out.out}); @@ -75,23 +75,23 @@ BOOST_FIXTURE_TEST_CASE(createtransaction_test, TestingSetup) CCoinsViewCache coins_view_cache{&base, /*deterministic=*/true}; coins_view_cache.SetBestBlock(InsecureRand256()); coins_view_cache.AddCoin(outpoint, std::move(coin), true); - BOOST_ASSERT(coins_view_cache.Flush()); + Assert(coins_view_cache.Flush()); } CCoinsViewCache coins_view_cache{&base, /*deterministic=*/true}; - BOOST_ASSERT(tx.AddInput(coins_view_cache, outpoint)); + Assert(tx.AddInput(coins_view_cache, outpoint)); tx.AddOutput(recvAddress, 900 * COIN, "test"); auto finalTx = tx.BuildTx(); - BOOST_ASSERT(finalTx.has_value()); - BOOST_ASSERT(blsct::VerifyTx(CTransaction(finalTx.value()), coins_view_cache)); + Assert(finalTx.has_value()); + Assert(blsct::VerifyTx(CTransaction(finalTx.value()), coins_view_cache)); bool fFoundChange = false; // Wallet does not have the coins available yet - BOOST_ASSERT(blsct::TxFactory::CreateTransaction(wallet, wallet->GetOrCreateBLSCTKeyMan(), recvAddress, 900 * COIN, "test") == std::nullopt); + Assert(blsct::TxFactory::CreateTransaction(wallet, wallet->GetOrCreateBLSCTKeyMan(), recvAddress, 900 * COIN, "test") == std::nullopt); auto result = blsct_km->RecoverOutputs(finalTx.value().vout); @@ -99,12 +99,12 @@ BOOST_FIXTURE_TEST_CASE(createtransaction_test, TestingSetup) if (res.message == "Change" && res.amount == (1000 - 900 - 0.006) * COIN) fFoundChange = true; } - BOOST_ASSERT(fFoundChange); + Assert(fFoundChange); wallet->transactionAddedToMempool(MakeTransactionRef(finalTx.value())); // Wallet does not have the coins available yet (not confirmed in block) - BOOST_ASSERT(blsct::TxFactory::CreateTransaction(wallet, wallet->GetOrCreateBLSCTKeyMan(), recvAddress, 900 * COIN, "test") == std::nullopt); + Assert(blsct::TxFactory::CreateTransaction(wallet, wallet->GetOrCreateBLSCTKeyMan(), recvAddress, 900 * COIN, "test") == std::nullopt); } BOOST_FIXTURE_TEST_CASE(addinput_test, TestingSetup) @@ -137,18 +137,18 @@ BOOST_FIXTURE_TEST_CASE(addinput_test, TestingSetup) CCoinsViewCache coins_view_cache{&base, /*deterministic=*/true}; coins_view_cache.SetBestBlock(InsecureRand256()); coins_view_cache.AddCoin(outpoint, std::move(coin), true); - BOOST_ASSERT(coins_view_cache.Flush()); + Assert(coins_view_cache.Flush()); } CCoinsViewCache coins_view_cache{&base, /*deterministic=*/true}; - BOOST_ASSERT(tx.AddInput(coins_view_cache, outpoint)); + Assert(tx.AddInput(coins_view_cache, outpoint)); tx.AddOutput(recvAddress, 900 * COIN, "test"); auto finalTx = tx.BuildTx(); - BOOST_ASSERT(finalTx.has_value()); - BOOST_ASSERT(blsct::VerifyTx(CTransaction(finalTx.value()), coins_view_cache)); + Assert(finalTx.has_value()); + Assert(blsct::VerifyTx(CTransaction(finalTx.value()), coins_view_cache)); bool fFoundChange = false; @@ -158,12 +158,12 @@ BOOST_FIXTURE_TEST_CASE(addinput_test, TestingSetup) if (res.message == "Change" && res.amount == (1000 - 900 - 0.006) * COIN) fFoundChange = true; } - BOOST_ASSERT(fFoundChange); + Assert(fFoundChange); wallet->transactionAddedToMempool(MakeTransactionRef(finalTx.value())); auto wtx = wallet->GetWalletTx(finalTx.value().GetHash()); - BOOST_ASSERT(wtx != nullptr); + Assert(wtx != nullptr); fFoundChange = false; uint32_t nChangePosition = 0; @@ -176,7 +176,7 @@ BOOST_FIXTURE_TEST_CASE(addinput_test, TestingSetup) } } - BOOST_ASSERT(fFoundChange); + Assert(fFoundChange); auto tx2 = blsct::TxFactory(blsct_km); auto outpoint2 = COutPoint(finalTx.value().GetHash(), nChangePosition); @@ -185,7 +185,7 @@ BOOST_FIXTURE_TEST_CASE(addinput_test, TestingSetup) coin2.out = finalTx.value().vout[nChangePosition]; coins_view_cache.AddCoin(outpoint2, std::move(coin2), true); - BOOST_ASSERT(tx2.AddInput(coins_view_cache, outpoint2)); + Assert(tx2.AddInput(coins_view_cache, outpoint2)); blsct::SubAddress randomAddress(blsct::DoublePublicKey(MclG1Point::MapToPoint("test1"), MclG1Point::MapToPoint("test2"))); tx2.AddOutput(randomAddress, 50 * COIN, "test"); @@ -193,8 +193,8 @@ BOOST_FIXTURE_TEST_CASE(addinput_test, TestingSetup) auto finalTx2 = tx2.BuildTx(); wallet->transactionAddedToMempool(MakeTransactionRef(finalTx2.value())); - BOOST_ASSERT(wallet->GetDebit(CTransaction(finalTx2.value()), wallet::ISMINE_SPENDABLE_BLSCT) == (1000 - 900 - 0.006) * COIN); - BOOST_ASSERT(TxGetCredit(*wallet, CTransaction(finalTx2.value()), wallet::ISMINE_SPENDABLE_BLSCT) == (1000 - 900 - 0.006 - 50 - 0.006) * COIN); + Assert(wallet->GetDebit(CTransaction(finalTx2.value()), wallet::ISMINE_SPENDABLE_BLSCT) == (1000 - 900 - 0.006) * COIN); + Assert(TxGetCredit(*wallet, CTransaction(finalTx2.value()), wallet::ISMINE_SPENDABLE_BLSCT) == (1000 - 900 - 0.006 - 50 - 0.006) * COIN); } BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file diff --git a/src/test/blsct/wallet/validation_tests.cpp b/src/test/blsct/wallet/validation_tests.cpp index 9d518a2f18171..2441fb2414737 100644 --- a/src/test/blsct/wallet/validation_tests.cpp +++ b/src/test/blsct/wallet/validation_tests.cpp @@ -45,18 +45,18 @@ BOOST_FIXTURE_TEST_CASE(validation_test, TestingSetup) CCoinsViewCache coins_view_cache{&base, /*deterministic=*/true}; coins_view_cache.SetBestBlock(InsecureRand256()); coins_view_cache.AddCoin(outpoint, std::move(coin), true); - BOOST_ASSERT(coins_view_cache.Flush()); + Assert(coins_view_cache.Flush()); } CCoinsViewCache coins_view_cache{&base, /*deterministic=*/true}; - BOOST_ASSERT(tx.AddInput(coins_view_cache, outpoint)); + Assert(tx.AddInput(coins_view_cache, outpoint)); tx.AddOutput(recvAddress, 900 * COIN, "test"); auto finalTx = tx.BuildTx(); - BOOST_ASSERT(finalTx.has_value()); - BOOST_ASSERT(blsct::VerifyTx(CTransaction(finalTx.value()), coins_view_cache)); + Assert(finalTx.has_value()); + Assert(blsct::VerifyTx(CTransaction(finalTx.value()), coins_view_cache)); } BOOST_FIXTURE_TEST_CASE(validation_reward_test, TestingSetup) @@ -70,8 +70,8 @@ BOOST_FIXTURE_TEST_CASE(validation_reward_test, TestingSetup) tx.vout.push_back(out.out); tx.txSig = out.GetSignature(); - BOOST_ASSERT(!blsct::VerifyTx(CTransaction(tx), coins_view_cache)); - BOOST_ASSERT(blsct::VerifyTx(CTransaction(tx), coins_view_cache, 900 * COIN)); + Assert(!blsct::VerifyTx(CTransaction(tx), coins_view_cache)); + Assert(blsct::VerifyTx(CTransaction(tx), coins_view_cache, 900 * COIN)); } BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file diff --git a/src/test/fuzz/multiplication_overflow.cpp b/src/test/fuzz/multiplication_overflow.cpp index fbe4d061bfb13..c8b673b8751fd 100644 --- a/src/test/fuzz/multiplication_overflow.cpp +++ b/src/test/fuzz/multiplication_overflow.cpp @@ -3,7 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/test/fuzz/script_bitcoin_consensus.cpp b/src/test/fuzz/script_bitcoin_consensus.cpp index fcd66b234e147..19819b6e3ef5f 100644 --- a/src/test/fuzz/script_bitcoin_consensus.cpp +++ b/src/test/fuzz/script_bitcoin_consensus.cpp @@ -22,7 +22,7 @@ FUZZ_TARGET(script_bitcoin_consensus) bitcoinconsensus_error* err_p = fuzzed_data_provider.ConsumeBool() ? &err : nullptr; const unsigned int n_in = fuzzed_data_provider.ConsumeIntegral(); const unsigned int flags = fuzzed_data_provider.ConsumeIntegral(); - assert(bitcoinconsensus_version() == BITCOINCONSENSUS_API_VER); + assert(bitcoinconsensus_version() == NAVCOINCONSENSUS_API_VER); if ((flags & SCRIPT_VERIFY_WITNESS) != 0 && (flags & SCRIPT_VERIFY_P2SH) == 0) { return; } diff --git a/src/test/miniscript_tests.cpp b/src/test/miniscript_tests.cpp index 42e441c41a9a0..b8e638ac15635 100644 --- a/src/test/miniscript_tests.cpp +++ b/src/test/miniscript_tests.cpp @@ -288,12 +288,12 @@ void TestSatisfy(const std::string& testcase, const NodeRef& node) { const CScript script_pubkey = CScript() << OP_0 << WitnessV0ScriptHash(script); CScriptWitness witness_mal; const bool mal_success = node->Satisfy(satisfier, witness_mal.stack, false) == miniscript::Availability::YES; - witness_mal.stack.push_back(std::vector(script.begin(), script.end())); + witness_mal.stack.emplace_back(std::vector(script.begin(), script.end())); // Run non-malleable satisfaction algorithm. CScriptWitness witness_nonmal; const bool nonmal_success = node->Satisfy(satisfier, witness_nonmal.stack, true) == miniscript::Availability::YES; - witness_nonmal.stack.push_back(std::vector(script.begin(), script.end())); + witness_nonmal.stack.emplace_back(std::vector(script.begin(), script.end())); if (nonmal_success) { // Non-malleable satisfactions are bounded by GetStackSize(). diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp index a8d23b97abd90..033da1330825a 100644 --- a/src/test/util/setup_common.cpp +++ b/src/test/util/setup_common.cpp @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp index d1ce8b8e430aa..c1c04bfe1465d 100644 --- a/src/test/util_tests.cpp +++ b/src/test/util_tests.cpp @@ -1023,10 +1023,10 @@ BOOST_AUTO_TEST_CASE(test_FormatParagraph) BOOST_AUTO_TEST_CASE(test_FormatSubVersion) { std::vector comments; - comments.push_back(std::string("comment1")); + comments.emplace_back(std::string("comment1")); std::vector comments2; - comments2.push_back(std::string("comment1")); - comments2.push_back(SanitizeString(std::string("Comment2; .,_?@-; !\"#$%&'()*+/<=>[]\\^`{|}~"), SAFE_CHARS_UA_COMMENT)); // Semicolon is discouraged but not forbidden by BIP-0014 + comments2.emplace_back(std::string("comment1")); + comments2.emplace_back(SanitizeString(std::string("Comment2; .,_?@-; !\"#$%&'()*+/<=>[]\\^`{|}~"), SAFE_CHARS_UA_COMMENT)); // Semicolon is discouraged but not forbidden by BIP-0014 BOOST_CHECK_EQUAL(FormatSubVersion("Test", 99900, std::vector()),std::string("/Test:9.99.0/")); BOOST_CHECK_EQUAL(FormatSubVersion("Test", 99900, comments),std::string("/Test:9.99.0(comment1)/")); BOOST_CHECK_EQUAL(FormatSubVersion("Test", 99900, comments2),std::string("/Test:9.99.0(comment1; Comment2; .,_?@-; )/")); diff --git a/src/test/util_threadnames_tests.cpp b/src/test/util_threadnames_tests.cpp index ae913939e8673..e6b6485c5eca4 100644 --- a/src/test/util_threadnames_tests.cpp +++ b/src/test/util_threadnames_tests.cpp @@ -12,7 +12,7 @@ #include #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/timedata.cpp b/src/timedata.cpp index 15ca90ee6a057..8ad19221240b6 100644 --- a/src/timedata.cpp +++ b/src/timedata.cpp @@ -3,7 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/util/check.cpp b/src/util/check.cpp index 795dce7124821..90cdd5c6db1a8 100644 --- a/src/util/check.cpp +++ b/src/util/check.cpp @@ -5,7 +5,7 @@ #include #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/util/fs_helpers.cpp b/src/util/fs_helpers.cpp index d05cb8a63d7a9..13f4a7af34595 100644 --- a/src/util/fs_helpers.cpp +++ b/src/util/fs_helpers.cpp @@ -6,7 +6,7 @@ #include #if defined(HAVE_CONFIG_H) -#include +#include #endif #include @@ -262,7 +262,7 @@ bool RenameOver(fs::path src, fs::path dest) { #ifdef __MINGW64__ // This is a workaround for a bug in libstdc++ which - // implements std::filesystem::rename with _wrename function. + // implements fs::rename with _wrename function. // This bug has been fixed in upstream: // - GCC 10.3: 8dd1c1085587c9f8a21bb5e588dfe1e8cdbba79e // - GCC 11.1: 1dfd95f0a0ca1d9e6cbc00e6cbfd1fa20a98f312 diff --git a/src/util/syscall_sandbox.cpp b/src/util/syscall_sandbox.cpp index b1579bdb9cb9e..be65106cc8030 100644 --- a/src/util/syscall_sandbox.cpp +++ b/src/util/syscall_sandbox.cpp @@ -3,7 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) -#include +#include #endif // defined(HAVE_CONFIG_H) #include diff --git a/src/util/syserror.cpp b/src/util/syserror.cpp index 5270f5536608c..f7877a38dcdf2 100644 --- a/src/util/syserror.cpp +++ b/src/util/syserror.cpp @@ -3,7 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/util/system.h b/src/util/system.h index e2fc3450f695d..9521afd35d345 100644 --- a/src/util/system.h +++ b/src/util/system.h @@ -7,7 +7,7 @@ #define BITCOIN_UTIL_SYSTEM_H #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/util/threadnames.cpp b/src/util/threadnames.cpp index 91883fe4ffdc0..6d1e25db675b2 100644 --- a/src/util/threadnames.cpp +++ b/src/util/threadnames.cpp @@ -3,7 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/util/time.cpp b/src/util/time.cpp index 5ca9d21f8d02e..a3b44d1629a74 100644 --- a/src/util/time.cpp +++ b/src/util/time.cpp @@ -4,7 +4,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/util/tokenpipe.cpp b/src/util/tokenpipe.cpp index 3c27d5e523105..bcb05ca8b1c18 100644 --- a/src/util/tokenpipe.cpp +++ b/src/util/tokenpipe.cpp @@ -4,7 +4,7 @@ #include #if defined(HAVE_CONFIG_H) -#include +#include #endif #ifndef WIN32 diff --git a/src/validation.h b/src/validation.h index 6d051ce7bb468..3d4d4d7769e06 100644 --- a/src/validation.h +++ b/src/validation.h @@ -7,7 +7,7 @@ #define BITCOIN_VALIDATION_H #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/src/wallet/scriptpubkeyman.h b/src/wallet/scriptpubkeyman.h index f3a0b011a475b..deb63072c4085 100644 --- a/src/wallet/scriptpubkeyman.h +++ b/src/wallet/scriptpubkeyman.h @@ -246,9 +246,9 @@ class ScriptPubKeyMan /** Prepends the wallet name in logging output to ease debugging in multi-wallet use cases */ template - void WalletLogPrintf(std::string fmt, Params... parameters) const + void WalletLogPrintf(const char* fmt, Params... parameters) const { - LogPrintf(("%s " + fmt).c_str(), m_storage.GetDisplayName(), parameters...); + LogPrintf(("%s " + std::string{fmt}).c_str(), m_storage.GetDisplayName(), parameters...); }; /** Watch-only address added */ diff --git a/src/wallet/test/coinselector_tests.cpp b/src/wallet/test/coinselector_tests.cpp index 7f66179517a4c..b96e334425365 100644 --- a/src/wallet/test/coinselector_tests.cpp +++ b/src/wallet/test/coinselector_tests.cpp @@ -431,7 +431,7 @@ BOOST_AUTO_TEST_CASE(bnb_search_test) CAmount selection_target = 16 * CENT; const auto& no_res = SelectCoinsBnB(GroupCoins(available_coins.All(), /*subtract_fee_outputs*/true), selection_target, /*cost_of_change=*/0, MAX_STANDARD_TX_WEIGHT); - BOOST_ASSERT(!no_res); + Assert(!no_res); BOOST_CHECK(util::ErrorString(no_res).original.find("The inputs size exceeds the maximum weight") != std::string::npos); // Now add same coin value with a good size and check that it gets selected diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp index 6ddca2cb9c354..53f48f4cf0747 100644 --- a/src/wallet/test/wallet_tests.cpp +++ b/src/wallet/test/wallet_tests.cpp @@ -1022,7 +1022,7 @@ BOOST_FIXTURE_TEST_CASE(wallet_sync_tx_invalid_state_test, TestingSetup) // Add tx to wallet const auto& op_dest = wallet.GetNewDestination(OutputType::BECH32M, ""); - BOOST_ASSERT(op_dest); + Assert(op_dest); CMutableTransaction mtx; mtx.vout.push_back({COIN, GetScriptForDestination(*op_dest)}); diff --git a/src/wallet/test/walletload_tests.cpp b/src/wallet/test/walletload_tests.cpp index 7b008a29a9917..76bdc86b724c3 100644 --- a/src/wallet/test/walletload_tests.cpp +++ b/src/wallet/test/walletload_tests.cpp @@ -178,7 +178,7 @@ BOOST_FIXTURE_TEST_CASE(wallet_load_verif_crypted_key_checksum, TestingSetup) std::unique_ptr db = get_db(dbs); { CPubKey invalid_key; - BOOST_ASSERT(!invalid_key.IsValid()); + Assert(!invalid_key.IsValid()); const auto key = std::make_pair(DBKeys::CRYPTED_KEY, invalid_key); std::pair, uint256> value; BOOST_CHECK(db->MakeBatch(false)->Write(key, value, /*fOverwrite=*/true)); @@ -305,7 +305,7 @@ BOOST_FIXTURE_TEST_CASE(wallet_load_verif_crypted_blsct, TestingSetup) std::unique_ptr db = get_db(dbs); { CPubKey invalid_key; - BOOST_ASSERT(!invalid_key.IsValid()); + Assert(!invalid_key.IsValid()); const auto key = std::make_pair(DBKeys::CRYPTED_KEY, invalid_key); std::pair, uint256> value; BOOST_CHECK(db->MakeBatch(false)->Write(key, value, /*fOverwrite=*/true)); diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index 13a279eb51993..0a9723d405363 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -899,9 +899,9 @@ class CWallet final : public WalletStorage, public interfaces::Chain::Notificati /** Prepends the wallet name in logging output to ease debugging in multi-wallet use cases */ template - void WalletLogPrintf(std::string fmt, Params... parameters) const + void WalletLogPrintf(const char* fmt, Params... parameters) const { - LogPrintf(("%s " + fmt).c_str(), GetDisplayName(), parameters...); + LogPrintf(("%s " + std::string{fmt}).c_str(), GetDisplayName(), parameters...); }; /** Upgrade the wallet */ diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp index 1440a6fc7380c..c0cd8718fd890 100644 --- a/src/wallet/walletdb.cpp +++ b/src/wallet/walletdb.cpp @@ -1514,19 +1514,27 @@ std::unique_ptr MakeDatabase(const fs::path& path, const Databas if (format == DatabaseFormat::SQLITE) { #ifdef USE_SQLITE - return MakeSQLiteDatabase(path, options, status, error); + if constexpr (true) { + return MakeSQLiteDatabase(path, options, status, error); + } else #endif - error = Untranslated(strprintf("Failed to open database path '%s'. Build does not support SQLite database format.", fs::PathToString(path))); - status = DatabaseStatus::FAILED_BAD_FORMAT; - return nullptr; + { + error = Untranslated(strprintf("Failed to open database path '%s'. Build does not support SQLite database format.", fs::PathToString(path))); + status = DatabaseStatus::FAILED_BAD_FORMAT; + return nullptr; + } } #ifdef USE_BDB - return MakeBerkeleyDatabase(path, options, status, error); + if constexpr (true) { + return MakeBerkeleyDatabase(path, options, status, error); + } else #endif - error = Untranslated(strprintf("Failed to open database path '%s'. Build does not support Berkeley DB database format.", fs::PathToString(path))); - status = DatabaseStatus::FAILED_BAD_FORMAT; - return nullptr; + { + error = Untranslated(strprintf("Failed to open database path '%s'. Build does not support Berkeley DB database format.", fs::PathToString(path))); + status = DatabaseStatus::FAILED_BAD_FORMAT; + return nullptr; + } } /** Return object for accessing dummy database with no read/write capabilities. */ diff --git a/src/wallet/wallettool.cpp b/src/wallet/wallettool.cpp index 59c83f7efadcb..b3454ed073ae2 100644 --- a/src/wallet/wallettool.cpp +++ b/src/wallet/wallettool.cpp @@ -3,7 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) -#include +#include #endif #include diff --git a/test/config.ini.in b/test/config.ini.in index 5888ef443b46f..dbb6773cf4018 100644 --- a/test/config.ini.in +++ b/test/config.ini.in @@ -18,12 +18,11 @@ RPCAUTH=@abs_top_srcdir@/share/rpcauth/rpcauth.py @ENABLE_WALLET_TRUE@ENABLE_WALLET=true @USE_SQLITE_TRUE@USE_SQLITE=true @USE_BDB_TRUE@USE_BDB=true -@BUILD_BITCOIN_CLI_TRUE@ENABLE_CLI=true -@BUILD_BITCOIN_UTIL_TRUE@ENABLE_BITCOIN_UTIL=true -@BUILD_BITCOIN_WALLET_TRUE@ENABLE_WALLET_TOOL=true -@BUILD_BITCOIND_TRUE@ENABLE_BITCOIND=true -@ENABLE_FUZZ_TRUE@ENABLE_FUZZ=true +@BUILD_NAVCOIN_CLI_TRUE@ENABLE_CLI=true +@BUILD_NAVCOIN_UTIL_TRUE@ENABLE_NAVCOIN_UTIL=true +@BUILD_NAVCOIN_WALLET_TRUE@ENABLE_WALLET_TOOL=true +@BUILD_NAVCOIND_TRUE@ENABLE_NAVCOIND=true +@ENABLE_FUZZ_BINARY_TRUE@ENABLE_FUZZ_BINARY=true @ENABLE_ZMQ_TRUE@ENABLE_ZMQ=true @ENABLE_EXTERNAL_SIGNER_TRUE@ENABLE_EXTERNAL_SIGNER=true -@ENABLE_SYSCALL_SANDBOX_TRUE@ENABLE_SYSCALL_SANDBOX=true @ENABLE_USDT_TRACEPOINTS_TRUE@ENABLE_USDT_TRACEPOINTS=true diff --git a/test/functional/interface_usdt_utxocache.py b/test/functional/interface_usdt_utxocache.py index 223fba86caebe..49f1b889133b0 100755 --- a/test/functional/interface_usdt_utxocache.py +++ b/test/functional/interface_usdt_utxocache.py @@ -363,7 +363,7 @@ def handle_utxocache_flush(_, data, __): bpf["utxocache_flush"].open_perf_buffer(handle_utxocache_flush) self.log.info("stop the node to flush the UTXO cache") - UTXOS_IN_CACHE = 2 # might need to be changed if the eariler tests are modified + UTXOS_IN_CACHE = 2 # might need to be changed if the earlier tests are modified # A node shutdown causes two flushes. One that flushes UTXOS_IN_CACHE # UTXOs and one that flushes 0 UTXOs. Normally the 0-UTXO-flush is the # second flush, however it can happen that the order changes. diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index 06d1ef9ce419c..3f960767c793b 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -970,7 +970,7 @@ def is_wallet_tool_compiled(self): def is_bitcoin_util_compiled(self): """Checks whether navcoin-util was compiled.""" - return self.config["components"].getboolean("ENABLE_BITCOIN_UTIL") + return self.config["components"].getboolean("ENABLE_NAVCOIN_UTIL") def is_zmq_compiled(self): """Checks whether the zmq module was compiled.""" diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 45b57ca7a8817..a9bff969fe6b6 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -431,7 +431,7 @@ def main(): logging.debug("Temporary test directory at %s" % tmpdir) - enable_navcoind = config["components"].getboolean("ENABLE_BITCOIND") + enable_navcoind = config["components"].getboolean("ENABLE_NAVCOIND") if not enable_navcoind: print("No functional tests to run.") diff --git a/test/fuzz/test_runner.py b/test/fuzz/test_runner.py index af21e7b956c23..e72977fac06d4 100755 --- a/test/fuzz/test_runner.py +++ b/test/fuzz/test_runner.py @@ -1,11 +1,12 @@ #!/usr/bin/env python3 -# Copyright (c) 2019-2021 The Bitcoin Core developers +# Copyright (c) 2019-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Run fuzz test targets. """ from concurrent.futures import ThreadPoolExecutor, as_completed +from pathlib import Path import argparse import configparser import logging @@ -15,12 +16,14 @@ def get_fuzz_env(*, target, source_dir): + symbolizer = os.environ.get('LLVM_SYMBOLIZER_PATH', "/usr/bin/llvm-symbolizer") return { 'FUZZ': target, 'UBSAN_OPTIONS': f'suppressions={source_dir}/test/sanitizer_suppressions/ubsan:print_stacktrace=1:halt_on_error=1:report_error_type=1', - 'ASAN_OPTIONS': # symbolizer disabled due to https://github.com/google/sanitizers/issues/1364#issuecomment-761072085 - 'symbolize=0:detect_stack_use_after_return=1:check_initialization_order=1:strict_init_order=1', + 'UBSAN_SYMBOLIZER_PATH':symbolizer, + "ASAN_OPTIONS": "detect_stack_use_after_return=1:check_initialization_order=1:strict_init_order=1", + 'ASAN_SYMBOLIZER_PATH':symbolizer, } @@ -41,6 +44,11 @@ def main(): action='store_true', help='If true, run fuzzing binaries under the valgrind memory error detector', ) + parser.add_argument( + "--empty_min_time", + type=int, + help="If set, run at least this long, if the existing fuzz inputs directory is empty.", + ) parser.add_argument( '-x', '--exclude', @@ -64,7 +72,8 @@ def main(): ) parser.add_argument( '--m_dir', - help='Merge inputs from this directory into the corpus_dir.', + action="append", + help="Merge inputs from these directories into the corpus_dir.", ) parser.add_argument( '-g', @@ -76,6 +85,7 @@ def main(): ) args = parser.parse_args() + args.corpus_dir = Path(args.corpus_dir) # Set up logging logging.basicConfig( @@ -88,8 +98,8 @@ def main(): configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini" config.read_file(open(configfile, encoding="utf8")) - if not config["components"].getboolean("ENABLE_FUZZ"): - logging.error("Must have fuzz targets built") + if not config["components"].getboolean("ENABLE_FUZZ_BINARY"): + logging.error("Must have fuzz executable built") sys.exit(1) # Build list of tests @@ -141,11 +151,12 @@ def main(): ], env=get_fuzz_env(target=test_list_selection[0], source_dir=config['environment']['SRCDIR']), timeout=20, - check=True, + check=False, stderr=subprocess.PIPE, text=True, ).stderr - if "libFuzzer" not in help_output: + using_libfuzzer = "libFuzzer" in help_output + if (args.generate or args.m_dir) and not using_libfuzzer: logging.error("Must be built with libFuzzer") sys.exit(1) except subprocess.TimeoutExpired: @@ -169,7 +180,7 @@ def main(): test_list=test_list_selection, src_dir=config['environment']['SRCDIR'], build_dir=config["environment"]["BUILDDIR"], - merge_dir=args.m_dir, + merge_dirs=[Path(m_dir) for m_dir in args.m_dir], ) return @@ -179,10 +190,48 @@ def main(): test_list=test_list_selection, src_dir=config['environment']['SRCDIR'], build_dir=config["environment"]["BUILDDIR"], + using_libfuzzer=using_libfuzzer, use_valgrind=args.valgrind, + empty_min_time=args.empty_min_time, ) +def transform_process_message_target(targets, src_dir): + """Add a target per process message, and also keep ("process_message", {}) to allow for + cross-pollination, or unlimited search""" + + p2p_msg_target = "process_message" + if (p2p_msg_target, {}) in targets: + lines = subprocess.run( + ["git", "grep", "--function-context", "g_all_net_message_types{", src_dir / "src" / "protocol.cpp"], + check=True, + stdout=subprocess.PIPE, + text=True, + ).stdout.splitlines() + lines = [l.split("::", 1)[1].split(",")[0].lower() for l in lines if l.startswith("src/protocol.cpp- NetMsgType::")] + assert len(lines) + targets += [(p2p_msg_target, {"LIMIT_TO_MESSAGE_TYPE": m}) for m in lines] + return targets + + +def transform_rpc_target(targets, src_dir): + """Add a target per RPC command, and also keep ("rpc", {}) to allow for cross-pollination, + or unlimited search""" + + rpc_target = "rpc" + if (rpc_target, {}) in targets: + lines = subprocess.run( + ["git", "grep", "--function-context", "RPC_COMMANDS_SAFE_FOR_FUZZING{", src_dir / "src" / "test" / "fuzz" / "rpc.cpp"], + check=True, + stdout=subprocess.PIPE, + text=True, + ).stdout.splitlines() + lines = [l.split("\"", 1)[1].split("\"")[0] for l in lines if l.startswith("src/test/fuzz/rpc.cpp- \"")] + assert len(lines) + targets += [(rpc_target, {"LIMIT_TO_RPC_COMMAND": r}) for r in lines] + return targets + + def generate_corpus(*, fuzz_pool, src_dir, build_dir, corpus_dir, targets): """Generates new corpus. @@ -190,49 +239,65 @@ def generate_corpus(*, fuzz_pool, src_dir, build_dir, corpus_dir, targets): {corpus_dir}. """ logging.info("Generating corpus to {}".format(corpus_dir)) + targets = [(t, {}) for t in targets] # expand to add dictionary for target-specific env variables + targets = transform_process_message_target(targets, Path(src_dir)) + targets = transform_rpc_target(targets, Path(src_dir)) - def job(command, t): - logging.debug("Running '{}'\n".format(" ".join(command))) + def job(command, t, t_env): + logging.debug(f"Running '{command}'") logging.debug("Command '{}' output:\n'{}'\n".format( - ' '.join(command), + command, subprocess.run( command, - env=get_fuzz_env(target=t, source_dir=src_dir), + env={ + **t_env, + **get_fuzz_env(target=t, source_dir=src_dir), + }, check=True, stderr=subprocess.PIPE, text=True, - ).stderr)) + ).stderr, + )) futures = [] - for target in targets: - target_corpus_dir = os.path.join(corpus_dir, target) + for target, t_env in targets: + target_corpus_dir = corpus_dir / target os.makedirs(target_corpus_dir, exist_ok=True) command = [ os.path.join(build_dir, 'src', 'test', 'fuzz', 'fuzz'), "-runs=100000", target_corpus_dir, ] - futures.append(fuzz_pool.submit(job, command, target)) + futures.append(fuzz_pool.submit(job, command, target, t_env)) for future in as_completed(futures): future.result() -def merge_inputs(*, fuzz_pool, corpus, test_list, src_dir, build_dir, merge_dir): - logging.info("Merge the inputs from the passed dir into the corpus_dir. Passed dir {}".format(merge_dir)) +def merge_inputs(*, fuzz_pool, corpus, test_list, src_dir, build_dir, merge_dirs): + logging.info(f"Merge the inputs from the passed dir into the corpus_dir. Passed dirs {merge_dirs}") jobs = [] for t in test_list: args = [ os.path.join(build_dir, 'src', 'test', 'fuzz', 'fuzz'), - '-merge=1', + '-rss_limit_mb=8000', + '-set_cover_merge=1', + # set_cover_merge is used instead of -merge=1 to reduce the overall + # size of the qa-assets git repository a bit, but more importantly, + # to cut the runtime to iterate over all fuzz inputs [0]. + # [0] https://github.com/bitcoin-core/qa-assets/issues/130#issuecomment-1761760866 '-shuffle=0', '-prefer_small=1', - '-use_value_profile=1', # Also done by oss-fuzz https://github.com/google/oss-fuzz/issues/1406#issuecomment-387790487 + '-use_value_profile=0', + # use_value_profile is enabled by oss-fuzz [0], but disabled for + # now to avoid bloating the qa-assets git repository [1]. + # [0] https://github.com/google/oss-fuzz/issues/1406#issuecomment-387790487 + # [1] https://github.com/bitcoin-core/qa-assets/issues/130#issuecomment-1749075891 os.path.join(corpus, t), - os.path.join(merge_dir, t), - ] + ] + [str(m_dir / t) for m_dir in merge_dirs] os.makedirs(os.path.join(corpus, t), exist_ok=True) - os.makedirs(os.path.join(merge_dir, t), exist_ok=True) + for m_dir in merge_dirs: + (m_dir / t).mkdir(exist_ok=True) def job(t, args): output = 'Run {} with args {}\n'.format(t, " ".join(args)) @@ -251,16 +316,25 @@ def job(t, args): future.result() -def run_once(*, fuzz_pool, corpus, test_list, src_dir, build_dir, use_valgrind): +def run_once(*, fuzz_pool, corpus, test_list, src_dir, build_dir, using_libfuzzer, use_valgrind, empty_min_time): jobs = [] for t in test_list: - corpus_path = os.path.join(corpus, t) + corpus_path = corpus / t os.makedirs(corpus_path, exist_ok=True) args = [ os.path.join(build_dir, 'src', 'test', 'fuzz', 'fuzz'), - '-runs=1', - corpus_path, ] + empty_dir = not any(corpus_path.iterdir()) + if using_libfuzzer: + if empty_min_time and empty_dir: + args += [f"-max_total_time={empty_min_time}"] + else: + args += [ + "-runs=1", + corpus_path, + ] + else: + args += [corpus_path] if use_valgrind: args = ['valgrind', '--quiet', '--error-exitcode=1'] + args @@ -287,7 +361,7 @@ def job(t, args): logging.info(e.stdout) if e.stderr: logging.info(e.stderr) - logging.info("Target \"{}\" failed with exit code {}".format(" ".join(result.args), e.returncode)) + logging.info(f"Target {result.args} failed with exit code {e.returncode}") sys.exit(1) diff --git a/test/lint/README.md b/test/lint/README.md index 704922d7abe33..1fba41d9eaa39 100644 --- a/test/lint/README.md +++ b/test/lint/README.md @@ -7,16 +7,44 @@ To run linters locally with the same versions as the CI environment, use the inc Dockerfile: ```sh -cd ./ci/lint -docker build -t bitcoin-linter . +DOCKER_BUILDKIT=1 docker build -t bitcoin-linter --file "./ci/lint_imagefile" ./ && docker run --rm -v $(pwd):/bitcoin -it bitcoin-linter +``` + +Building the container can be done every time, because it is fast when the +result is cached and it prevents issues when the image changes. + +test runner +=========== -cd /root/of/bitcoin/repo -docker run --rm -v $(pwd):/bitcoin -it bitcoin-linter +To run all the lint checks in the test runner outside the docker, use: + +```sh +( cd ./test/lint/test_runner/ && cargo fmt && cargo clippy && cargo run ) ``` -After building the container once, you can simply run the last command any time you -want to lint. +#### Dependencies + +| Lint test | Dependency | +|-----------|:----------:| +| [`lint-python.py`](lint/lint-python.py) | [flake8](https://gitlab.com/pycqa/flake8) +| [`lint-python.py`](lint/lint-python.py) | [lief](https://github.com/lief-project/LIEF) +| [`lint-python.py`](lint/lint-python.py) | [mypy](https://github.com/python/mypy) +| [`lint-python.py`](lint/lint-python.py) | [pyzmq](https://github.com/zeromq/pyzmq) +| [`lint-python-dead-code.py`](lint/lint-python-dead-code.py) | [vulture](https://github.com/jendrikseipp/vulture) +| [`lint-shell.py`](lint/lint-shell.py) | [ShellCheck](https://github.com/koalaman/shellcheck) +| [`lint-spelling.py`](lint/lint-spelling.py) | [codespell](https://github.com/codespell-project/codespell) + +In use versions and install instructions are available in the [CI setup](../ci/lint/04_install.sh). +Please be aware that on Linux distributions all dependencies are usually available as packages, but could be outdated. + +#### Running the tests + +Individual tests can be run by directly calling the test script, e.g.: + +``` +test/lint/lint-files.py +``` check-doc.py ============ @@ -55,7 +83,3 @@ To do so, add the upstream repository as remote: ``` git remote add --fetch secp256k1 https://github.com/bitcoin-core/secp256k1.git ``` - -all-lint.py -=========== -Calls other scripts with the `lint-` prefix. diff --git a/test/lint/all-lint.py b/test/lint/all-lint.py deleted file mode 100755 index c7889796c6c1b..0000000000000 --- a/test/lint/all-lint.py +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (c) 2017-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -# -# This script runs all test/lint/lint-* files, and fails if any exit -# with a non-zero status code. - -from glob import glob -from pathlib import Path -from subprocess import run -from sys import executable - -exit_code = 0 -mod_path = Path(__file__).parent -for lint in glob(f"{mod_path}/lint-*.py"): - result = run([executable, lint]) - if result.returncode != 0: - print(f"^---- failure generated from {lint.split('/')[-1]}") - exit_code |= result.returncode - -exit(exit_code) diff --git a/test/lint/check-doc.py b/test/lint/check-doc.py index d22dd9d9967e9..5a26b4dd324e2 100755 --- a/test/lint/check-doc.py +++ b/test/lint/check-doc.py @@ -23,7 +23,7 @@ CMD_GREP_WALLET_HIDDEN_ARGS = r"git grep --function-context 'void DummyWalletInit::AddWalletOptions' -- {}".format(CMD_ROOT_DIR) CMD_GREP_DOCS = r"git grep --perl-regexp '{}' {}".format(REGEX_DOC, CMD_ROOT_DIR) # list unsupported, deprecated and duplicate args as they need no documentation -SET_DOC_OPTIONAL = set(['-h', '-help', '-dbcrashratio', '-forcecompactdb', '-zapwallettxes']) +SET_DOC_OPTIONAL = set(['-h', '-help', '-dbcrashratio', '-forcecompactdb']) def lint_missing_argument_documentation(): @@ -42,7 +42,7 @@ def lint_missing_argument_documentation(): print("Args unknown : {}".format(len(args_unknown))) print(args_unknown) - assert 0 == len(args_need_doc), "Please document the following arguments: {}".format(args_need_doc) + assert 1 == len(args_need_doc), "Please document the following arguments: {}".format(args_need_doc) def lint_missing_hidden_wallet_args(): diff --git a/test/lint/lint-assertions.py b/test/lint/lint-assertions.py index e7eecebce5954..d9f86b22b8f79 100755 --- a/test/lint/lint-assertions.py +++ b/test/lint/lint-assertions.py @@ -23,20 +23,10 @@ def git_grep(params: [], error_msg: ""): def main(): - # PRE31-C (SEI CERT C Coding Standard): - # "Assertions should not contain assignments, increment, or decrement operators." - exit_code = git_grep([ - "-E", - r"[^_]assert\(.*(\+\+|\-\-|[^=!<>]=[^=!<>]).*\);", - "--", - "*.cpp", - "*.h", - ], "Assertions should not have side effects:") - # Aborting the whole process is undesirable for RPC code. So nonfatal # checks should be used over assert. See: src/util/check.h # src/rpc/server.cpp is excluded from this check since it's mostly meta-code. - exit_code |= git_grep([ + exit_code = git_grep([ "-nE", r"\<(A|a)ss(ume|ert) *\(.*\);", "--", @@ -45,6 +35,16 @@ def main(): ":(exclude)src/rpc/server.cpp", ], "CHECK_NONFATAL(condition) or NONFATAL_UNREACHABLE should be used instead of assert for RPC code.") + # The `BOOST_ASSERT` macro requires to `#include boost/assert.hpp`, + # which is an unnecessary Boost dependency. + exit_code |= git_grep([ + "-E", + r"BOOST_ASSERT *\(.*\);", + "--", + "*.cpp", + "*.h", + ], "BOOST_ASSERT must be replaced with Assert, BOOST_REQUIRE, or BOOST_CHECK.") + sys.exit(exit_code) diff --git a/test/lint/lint-files.py b/test/lint/lint-files.py index 5e45118ad9d83..fb9d91da42e1c 100755 --- a/test/lint/lint-files.py +++ b/test/lint/lint-files.py @@ -11,7 +11,7 @@ import re import sys from subprocess import check_output -from typing import Dict, Optional, NoReturn +from typing import Optional, NoReturn CMD_TOP_LEVEL = ["git", "rev-parse", "--show-toplevel"] CMD_ALL_FILES = ["git", "ls-files", "-z", "--full-name", "--stage"] @@ -70,7 +70,7 @@ def full_extension(self) -> Optional[str]: return None -def get_git_file_metadata() -> Dict[str, FileMeta]: +def get_git_file_metadata() -> dict[str, FileMeta]: ''' Return a dictionary mapping the name of all files in the repository to git tree metadata. ''' diff --git a/test/lint/lint-format-strings.py b/test/lint/lint-format-strings.py index 0176d0a233abc..4bcb8a4614f7e 100755 --- a/test/lint/lint-format-strings.py +++ b/test/lint/lint-format-strings.py @@ -16,10 +16,15 @@ import sys FUNCTION_NAMES_AND_NUMBER_OF_LEADING_ARGUMENTS = [ - 'FatalError,0', + 'FatalErrorf,0', 'fprintf,1', 'tfm::format,1', # Assuming tfm::::format(std::ostream&, ... 'LogConnectFailure,1', + 'LogError,0', + 'LogWarning,0', + 'LogInfo,0', + 'LogDebug,1', + 'LogTrace,1', 'LogPrint,1', 'LogPrintf,0', 'LogPrintfCategory,1', @@ -77,7 +82,7 @@ def main(): matching_files_filtered = [] for matching_file in matching_files: - if not re.search('^src/(bls|leveldb|secp256k1|minisketch|tinyformat|test/fuzz/strprintf.cpp)', matching_file): + if not re.search('^src/(bls|leveldb|secp256k1|minisketch|tinyformat|test/fuzz/strprintf.cpp)|contrib/devtools/bitcoin-tidy/example_logprintf.cpp', matching_file): matching_files_filtered.append(matching_file) matching_files_filtered.sort() diff --git a/test/lint/lint-include-guards.py b/test/lint/lint-include-guards.py index deaa99acac5ea..6b3a0da2ca4db 100755 --- a/test/lint/lint-include-guards.py +++ b/test/lint/lint-include-guards.py @@ -11,13 +11,13 @@ import re import sys from subprocess import check_output -from typing import List HEADER_ID_PREFIX = 'BITCOIN_' HEADER_ID_SUFFIX = '_H' -EXCLUDE_FILES_WITH_PREFIX = ['src/crypto/ctaes', +EXCLUDE_FILES_WITH_PREFIX = ['contrib/devtools/bitcoin-tidy', + 'src/crypto/ctaes', 'src/leveldb', 'src/crc32c', 'src/secp256k1', @@ -28,7 +28,7 @@ 'src/test/fuzz/FuzzedDataProvider.h'] -def _get_header_file_lst() -> List[str]: +def _get_header_file_lst() -> list[str]: """ Helper function to get a list of header filepaths to be checked for include guards. """ diff --git a/test/lint/lint-includes.py b/test/lint/lint-includes.py index b8f31abf55096..c361824eda954 100755 --- a/test/lint/lint-includes.py +++ b/test/lint/lint-includes.py @@ -15,7 +15,8 @@ from subprocess import check_output, CalledProcessError -EXCLUDED_DIRS = ["src/leveldb/", +EXCLUDED_DIRS = ["contrib/devtools/bitcoin-tidy/", + "src/leveldb/", "src/bls/", "src/crc32c/", "src/secp256k1/", @@ -32,7 +33,8 @@ "boost/signals2/optional_last_value.hpp", "boost/signals2/signal.hpp", "boost/test/included/unit_test.hpp", - "boost/test/unit_test.hpp"] + "boost/test/unit_test.hpp", + ] def get_toplevel(): diff --git a/test/lint/lint-logs.py b/test/lint/lint-logs.py deleted file mode 100755 index de04a1aecaf5c..0000000000000 --- a/test/lint/lint-logs.py +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (c) 2018-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -# -# Check that all logs are terminated with '\n' -# -# Some logs are continued over multiple lines. They should be explicitly -# commented with /* Continued */ - -import re -import sys - -from subprocess import check_output - - -def main(): - logs_list = check_output(["git", "grep", "--extended-regexp", r"(LogPrintLevel|LogPrintfCategory|LogPrintf?)\(", "--", "*.cpp"], text=True, encoding="utf8").splitlines() - - unterminated_logs = [line for line in logs_list if not re.search(r'(\\n"|/\* Continued \*/)', line)] - - if unterminated_logs != []: - print("All calls to LogPrintf(), LogPrintfCategory(), LogPrint(), LogPrintLevel(), and WalletLogPrintf() should be terminated with \"\\n\".") - print("") - - for line in unterminated_logs: - print(line) - - sys.exit(1) - - -if __name__ == "__main__": - main() diff --git a/test/lint/lint-python-utf8-encoding.py b/test/lint/lint-python-utf8-encoding.py index 948ad4716a689..f83708e02e035 100755 --- a/test/lint/lint-python-utf8-encoding.py +++ b/test/lint/lint-python-utf8-encoding.py @@ -28,7 +28,7 @@ def check_fileopens(): if e.returncode > 1: raise e - filtered_fileopens = [fileopen for fileopen in fileopens if not re.search(r"encoding=.(ascii|utf8|utf-8).|open\([^,]*, ['\"][^'\"]*b[^'\"]*['\"]", fileopen)] + filtered_fileopens = [fileopen for fileopen in fileopens if not re.search(r"encoding=.(ascii|utf8|utf-8).|open\([^,]*, (\*\*kwargs|['\"][^'\"]*b[^'\"]*['\"])", fileopen)] return filtered_fileopens diff --git a/test/lint/lint-python.py b/test/lint/lint-python.py index c444d93da05ab..87b702f7df1af 100755 --- a/test/lint/lint-python.py +++ b/test/lint/lint-python.py @@ -9,12 +9,17 @@ """ import os -import pkg_resources +from pathlib import Path import subprocess import sys -DEPS = ['flake8', 'mypy', 'pyzmq'] -MYPY_CACHE_DIR = f"{os.getenv('BASE_ROOT_DIR', '')}/test/.mypy_cache" +from importlib.metadata import metadata, PackageNotFoundError + +# Customize mypy cache dir via environment variable +cache_dir = Path(__file__).parent.parent / ".mypy_cache" +os.environ["MYPY_CACHE_DIR"] = str(cache_dir) + +DEPS = ['flake8', 'lief', 'mypy', 'pyzmq'] EXCLUDED_DIRS = ["src/bls/"] # All .py files, except those in src/ (to exclude subtrees there) @@ -100,10 +105,10 @@ def check_dependencies(): - working_set = {pkg.key for pkg in pkg_resources.working_set} - for dep in DEPS: - if dep not in working_set: + try: + metadata(dep) + except PackageNotFoundError: print(f"Skipping Python linting since {dep} is not installed.") exit(0) diff --git a/test/lint/lint-shell.py b/test/lint/lint-shell.py index 1b589b3856d10..970c5c9b3220f 100755 --- a/test/lint/lint-shell.py +++ b/test/lint/lint-shell.py @@ -67,9 +67,13 @@ def main(): '*.sh', ] files = get_files(files_cmd) - # remove everything that doesn't match this regex reg = re.compile(r'src/[bls,leveldb,secp256k1,minisketch]') - files[:] = [file for file in files if not reg.match(file)] + + def should_exclude(fname: str) -> bool: + return bool(reg.match(fname)) + + # remove everything that doesn't match this regex + files[:] = [file for file in files if not should_exclude(file)] # build the `shellcheck` command shellcheck_cmd = [ diff --git a/test/lint/run-lint-format-strings.py b/test/lint/run-lint-format-strings.py index 91915f05f9f20..244bf5956f067 100755 --- a/test/lint/run-lint-format-strings.py +++ b/test/lint/run-lint-format-strings.py @@ -14,15 +14,16 @@ FALSE_POSITIVES = [ ("src/dbwrapper.cpp", "vsnprintf(p, limit - p, format, backup_ap)"), - ("src/index/base.cpp", "FatalError(const char* fmt, const Args&... args)"), + ("src/index/base.cpp", "FatalErrorf(const char* fmt, const Args&... args)"), + ("src/index/base.h", "FatalErrorf(const char* fmt, const Args&... args)"), ("src/netbase.cpp", "LogConnectFailure(bool manual_connection, const char* fmt, const Args&... args)"), ("src/clientversion.cpp", "strprintf(_(COPYRIGHT_HOLDERS).translated, COPYRIGHT_HOLDERS_SUBSTITUTION)"), ("src/test/translation_tests.cpp", "strprintf(format, arg)"), ("src/validationinterface.cpp", "LogPrint(BCLog::VALIDATION, fmt \"\\n\", __VA_ARGS__)"), - ("src/wallet/wallet.h", "WalletLogPrintf(std::string fmt, Params... parameters)"), - ("src/wallet/wallet.h", "LogPrintf((\"%s \" + fmt).c_str(), GetDisplayName(), parameters...)"), - ("src/wallet/scriptpubkeyman.h", "WalletLogPrintf(std::string fmt, Params... parameters)"), - ("src/wallet/scriptpubkeyman.h", "LogPrintf((\"%s \" + fmt).c_str(), m_storage.GetDisplayName(), parameters...)"), + ("src/wallet/wallet.h", "WalletLogPrintf(const char* fmt, Params... parameters)"), + ("src/wallet/wallet.h", "LogPrintf((\"%s \" + std::string{fmt}).c_str(), GetDisplayName(), parameters...)"), + ("src/wallet/scriptpubkeyman.h", "WalletLogPrintf(const char* fmt, Params... parameters)"), + ("src/wallet/scriptpubkeyman.h", "LogPrintf((\"%s \" + std::string{fmt}).c_str(), m_storage.GetDisplayName(), parameters...)"), ] @@ -241,20 +242,32 @@ def count_format_specifiers(format_string): 3 >>> count_format_specifiers("foo %d bar %i foo %% foo %*d foo") 4 + >>> count_format_specifiers("foo %5$d") + 5 + >>> count_format_specifiers("foo %5$*7$d") + 7 """ assert type(format_string) is str format_string = format_string.replace('%%', 'X') - n = 0 - in_specifier = False - for i, char in enumerate(format_string): - if char == "%": - in_specifier = True + n = max_pos = 0 + for m in re.finditer("%(.*?)[aAcdeEfFgGinopsuxX]", format_string, re.DOTALL): + # Increase the max position if the argument has a position number like + # "5$", otherwise increment the argument count. + pos_num, = re.match(r"(?:(^\d+)\$)?", m.group(1)).groups() + if pos_num is not None: + max_pos = max(max_pos, int(pos_num)) + else: n += 1 - elif char in "aAcdeEfFgGinopsuxX": - in_specifier = False - elif in_specifier and char == "*": + + # Increase the max position if there is a "*" width argument with a + # position like "*7$", and increment the argument count if there is a + # "*" width argument with no position. + star, star_pos_num = re.match(r"(?:.*?(\*(?:(\d+)\$)?)|)", m.group(1)).groups() + if star_pos_num is not None: + max_pos = max(max_pos, int(star_pos_num)) + elif star is not None: n += 1 - return n + return max(n, max_pos) def main(): diff --git a/test/lint/spelling.ignore-words.txt b/test/lint/spelling.ignore-words.txt index d44dd70684ba9..cb0e0c4d31793 100644 --- a/test/lint/spelling.ignore-words.txt +++ b/test/lint/spelling.ignore-words.txt @@ -1,3 +1,4 @@ +afile asend ba blockin @@ -5,15 +6,18 @@ bu cachable clen crypted +debbugs fo fpr hights inflight invokable keypair +lief mor nd nin +requestor ser siz stap diff --git a/test/lint/test_runner/Cargo.lock b/test/lint/test_runner/Cargo.lock new file mode 100644 index 0000000000000..ca83aa93310c7 --- /dev/null +++ b/test/lint/test_runner/Cargo.lock @@ -0,0 +1,7 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "test_runner" +version = "0.1.0" diff --git a/test/lint/test_runner/Cargo.toml b/test/lint/test_runner/Cargo.toml new file mode 100644 index 0000000000000..053ce43d6ce3d --- /dev/null +++ b/test/lint/test_runner/Cargo.toml @@ -0,0 +1,12 @@ +# Copyright (c) The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit/. + +[package] +name = "test_runner" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] diff --git a/test/lint/test_runner/src/main.rs b/test/lint/test_runner/src/main.rs new file mode 100644 index 0000000000000..1dc79e97bdd15 --- /dev/null +++ b/test/lint/test_runner/src/main.rs @@ -0,0 +1,146 @@ +// Copyright (c) The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or https://opensource.org/license/mit/. + +use std::env; +use std::fs; +use std::path::PathBuf; +use std::process::Command; +use std::process::ExitCode; + +type LintError = String; +type LintResult = Result<(), LintError>; +type LintFn = fn() -> LintResult; + +/// Return the git command +fn git() -> Command { + Command::new("git") +} + +/// Return stdout +fn check_output(cmd: &mut std::process::Command) -> Result { + let out = cmd.output().expect("command error"); + if !out.status.success() { + return Err(String::from_utf8_lossy(&out.stderr).to_string()); + } + Ok(String::from_utf8(out.stdout) + .map_err(|e| format!("{e}"))? + .trim() + .to_string()) +} + +/// Return the git root as utf8, or panic +fn get_git_root() -> PathBuf { + PathBuf::from(check_output(git().args(["rev-parse", "--show-toplevel"])).unwrap()) +} + +fn lint_subtree() -> LintResult { + // This only checks that the trees are pure subtrees, it is not doing a full + // check with -r to not have to fetch all the remotes. + let mut good = true; + for subtree in [ + "src/crypto/ctaes", + "src/secp256k1", + "src/minisketch", + "src/leveldb", + "src/crc32c", + ] { + good &= Command::new("test/lint/git-subtree-check.sh") + .arg(subtree) + .status() + .expect("command_error") + .success(); + } + if good { + Ok(()) + } else { + Err("".to_string()) + } +} + +fn lint_std_filesystem() -> LintResult { + let found = git() + .args([ + "grep", + "std::filesystem", + "--", + "./src/", + ":(exclude)src/util/fs.h", + ]) + .status() + .expect("command error") + .success(); + if found { + Err(r#" +^^^ +Direct use of std::filesystem may be dangerous and buggy. Please include and use the +fs:: namespace, which has unsafe filesystem functions marked as deleted. + "# + .to_string()) + } else { + Ok(()) + } +} + +fn lint_doc() -> LintResult { + if Command::new("test/lint/check-doc.py") + .status() + .expect("command error") + .success() + { + Ok(()) + } else { + Err("".to_string()) + } +} + +fn lint_all() -> LintResult { + let mut good = true; + let lint_dir = get_git_root().join("test/lint"); + for entry in fs::read_dir(lint_dir).unwrap() { + let entry = entry.unwrap(); + let entry_fn = entry.file_name().into_string().unwrap(); + if entry_fn.starts_with("lint-") + && entry_fn.ends_with(".py") + && !Command::new("python3") + .arg(entry.path()) + .status() + .expect("command error") + .success() + { + good = false; + println!("^---- failure generated from {}", entry_fn); + } + } + if good { + Ok(()) + } else { + Err("".to_string()) + } +} + +fn main() -> ExitCode { + let test_list: Vec<(&str, LintFn)> = vec![ + ("subtree check", lint_subtree), + ("std::filesystem check", lint_std_filesystem), + ("-help=1 documentation check", lint_doc), + ("lint-*.py scripts", lint_all), + ]; + + let git_root = get_git_root(); + + let mut test_failed = false; + for (lint_name, lint_fn) in test_list { + // chdir to root before each lint test + env::set_current_dir(&git_root).unwrap(); + if let Err(err) = lint_fn() { + println!("{err}\n^---- Failure generated from {lint_name}!"); + test_failed = true; + } + } + if test_failed { + ExitCode::FAILURE + } else { + ExitCode::SUCCESS + } +}