diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 43f017aad9791..11c1f681133b6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -65,7 +65,7 @@ jobs: run: clang --version - name: Install Homebrew packages - run: brew install boost libevent qt@5 miniupnpc libnatpmp ccache zeromq qrencode libtool automake gnu-getopt + run: brew install automake libtool pkg-config gnu-getopt ccache boost libevent miniupnpc libnatpmp zeromq qt@5 qrencode - name: Set Ccache directory run: echo "CCACHE_DIR=${RUNNER_TEMP}/ccache_dir" >> "$GITHUB_ENV" diff --git a/.gitignore b/.gitignore index c302a8c5451ad..1f2c0cd540459 100644 --- a/.gitignore +++ b/.gitignore @@ -79,7 +79,7 @@ src/qt/groestlcoin-qt.includes *.log *.trs -*.dmg +*.zip *.json.h *.raw.h diff --git a/Makefile.am b/Makefile.am index 80d87dcc20a26..471d4af9a7684 100644 --- a/Makefile.am +++ b/Makefile.am @@ -37,7 +37,7 @@ space := $(empty) $(empty) OSX_APP=Groestlcoin-Qt.app OSX_VOLNAME = $(subst $(space),-,$(PACKAGE_NAME)) -OSX_DMG = $(OSX_VOLNAME).dmg +OSX_ZIP = $(OSX_VOLNAME).zip OSX_DEPLOY_SCRIPT=$(top_srcdir)/contrib/macdeploy/macdeployqtplus OSX_INSTALLER_ICONS=$(top_srcdir)/src/qt/res/icons/groestlcoin.icns OSX_PLIST=$(top_builddir)/share/qt/Info.plist #not installed @@ -123,15 +123,16 @@ osx_volname: echo $(OSX_VOLNAME) >$@ if BUILD_DARWIN -$(OSX_DMG): $(OSX_APP_BUILT) $(OSX_PACKAGING) - $(PYTHON) $(OSX_DEPLOY_SCRIPT) $(OSX_APP) $(OSX_VOLNAME) -translations-dir=$(QT_TRANSLATION_DIR) -dmg +$(OSX_ZIP): $(OSX_APP_BUILT) $(OSX_PACKAGING) + $(PYTHON) $(OSX_DEPLOY_SCRIPT) $(OSX_APP) $(OSX_VOLNAME) -translations-dir=$(QT_TRANSLATION_DIR) -zip -deploydir: $(OSX_DMG) +deploydir: $(OSX_ZIP) else !BUILD_DARWIN APP_DIST_DIR=$(top_builddir)/dist -$(OSX_DMG): deploydir - $(XORRISOFS) -D -l -V "$(OSX_VOLNAME)" -no-pad -r -dir-mode 0755 -o $@ $(APP_DIST_DIR) -- $(if $(SOURCE_DATE_EPOCH),-volume_date all_file_dates =$(SOURCE_DATE_EPOCH)) +$(OSX_ZIP): deploydir + if [ -n "$(SOURCE_DATE_EPOCH)" ]; then find $(APP_DIST_DIR) -exec touch -d @$(SOURCE_DATE_EPOCH) {} +; fi + cd $(APP_DIST_DIR) && find . | sort | $(ZIP) -X@ $@ $(APP_DIST_DIR)/$(OSX_APP)/Contents/MacOS/Groestlcoin-Qt: $(OSX_APP_BUILT) $(OSX_PACKAGING) INSTALL_NAME_TOOL=$(INSTALL_NAME_TOOL) OTOOL=$(OTOOL) STRIP=$(STRIP) $(PYTHON) $(OSX_DEPLOY_SCRIPT) $(OSX_APP) $(OSX_VOLNAME) -translations-dir=$(QT_TRANSLATION_DIR) @@ -139,7 +140,7 @@ $(APP_DIST_DIR)/$(OSX_APP)/Contents/MacOS/Groestlcoin-Qt: $(OSX_APP_BUILT) $(OSX deploydir: $(APP_DIST_DIR)/$(OSX_APP)/Contents/MacOS/Groestlcoin-Qt endif !BUILD_DARWIN -deploy: $(OSX_DMG) +deploy: $(OSX_ZIP) endif $(GROESTLCOIN_QT_BIN): FORCE @@ -312,7 +313,7 @@ EXTRA_DIST += \ test/util/data/txcreatesignv2.hex \ test/util/rpcauth-test.py -CLEANFILES = $(OSX_DMG) $(GROESTLCOIN_WIN_INSTALLER) +CLEANFILES = $(OSX_ZIP) $(GROESTLCOIN_WIN_INSTALLER) DISTCHECK_CONFIGURE_FLAGS = --enable-man diff --git a/ci/test/00_setup_env_mac.sh b/ci/test/00_setup_env_mac.sh index ada9d8d6ff39b..dbd7ca249eb08 100644 --- a/ci/test/00_setup_env_mac.sh +++ b/ci/test/00_setup_env_mac.sh @@ -11,7 +11,7 @@ export SDK_URL=${SDK_URL:-https://bitcoincore.org/depends-sources/sdks} export CONTAINER_NAME=ci_macos_cross export CI_IMAGE_NAME_TAG="docker.io/ubuntu:22.04" export HOST=x86_64-apple-darwin -export PACKAGES="cmake libz-dev python3-setuptools xorriso" +export PACKAGES="cmake libz-dev python3-setuptools zip" export XCODE_VERSION=12.2 export XCODE_BUILD_ID=12B45b export RUN_UNIT_TESTS=false diff --git a/ci/test/01_base_install.sh b/ci/test/01_base_install.sh index b05d7547c8dfd..424dca52dc6c0 100755 --- a/ci/test/01_base_install.sh +++ b/ci/test/01_base_install.sh @@ -51,7 +51,7 @@ if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then -DLLVM_ENABLE_RUNTIMES="compiler-rt;libcxx;libcxxabi;libunwind" \ -S /msan/llvm-project/llvm - ninja -C /msan/clang_build/ "$MAKEJOBS" + ninja -C /msan/clang_build/ "-j$( nproc )" # Use nproc, because MAKEJOBS is the default in docker image builds ninja -C /msan/clang_build/ install-runtimes update-alternatives --install /usr/bin/clang++ clang++ /msan/clang_build/bin/clang++ 100 @@ -69,13 +69,13 @@ if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then -DLIBCXX_HARDENING_MODE=debug \ -S /msan/llvm-project/runtimes - ninja -C /msan/cxx_build/ "$MAKEJOBS" + ninja -C /msan/cxx_build/ "-j$( nproc )" # Use nproc, because MAKEJOBS is the default in docker image builds fi if [[ "${RUN_TIDY}" == "true" ]]; then git clone --depth=1 https://github.com/include-what-you-use/include-what-you-use -b clang_16 /include-what-you-use cmake -B /iwyu-build/ -G 'Unix Makefiles' -DCMAKE_PREFIX_PATH=/usr/lib/llvm-16 -S /include-what-you-use - make -C /iwyu-build/ install "$MAKEJOBS" + make -C /iwyu-build/ install "-j$( nproc )" # Use nproc, because MAKEJOBS is the default in docker image builds fi mkdir -p "${DEPENDS_DIR}/SDKs" "${DEPENDS_DIR}/sdk-sources" diff --git a/configure.ac b/configure.ac index a1386211c32cc..94639e1d5f39b 100644 --- a/configure.ac +++ b/configure.ac @@ -799,7 +799,7 @@ case $host in AC_PATH_TOOL([DSYMUTIL], [dsymutil], [dsymutil]) AC_PATH_TOOL([INSTALL_NAME_TOOL], [install_name_tool], [install_name_tool]) AC_PATH_TOOL([OTOOL], [otool], [otool]) - AC_PATH_PROGS([XORRISOFS], [xorrisofs], [xorrisofs]) + AC_PATH_PROG([ZIP], [zip], [zip]) dnl libtool will try to strip the static lib, which is a problem for dnl cross-builds because strip attempts to call a hard-coded ld, @@ -1936,7 +1936,6 @@ AC_CONFIG_FILES([contrib/devtools/split-debug.sh],[chmod +x contrib/devtools/spl AM_COND_IF([HAVE_DOXYGEN], [AC_CONFIG_FILES([doc/Doxyfile])]) AC_CONFIG_LINKS([contrib/devtools/iwyu/groestlcoin.core.imp:contrib/devtools/iwyu/groestlcoin.core.imp]) AC_CONFIG_LINKS([contrib/filter-lcov.py:contrib/filter-lcov.py]) -AC_CONFIG_LINKS([contrib/macdeploy/background.tiff:contrib/macdeploy/background.tiff]) AC_CONFIG_LINKS([src/.bear-tidy-config:src/.bear-tidy-config]) AC_CONFIG_LINKS([src/.clang-tidy:src/.clang-tidy]) dnl !!!P AC_CONFIG_LINKS([test/functional/test_runner.py:test/functional/test_runner.py]) diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh index 0f0c589b22bf9..782065636e38d 100755 --- a/contrib/guix/libexec/build.sh +++ b/contrib/guix/libexec/build.sh @@ -315,7 +315,7 @@ mkdir -p "$DISTSRC" | gzip -9n > "${OUTDIR}/${DISTNAME}-${HOST}-unsigned.tar.gz" \ || ( rm -f "${OUTDIR}/${DISTNAME}-${HOST}-unsigned.tar.gz" && exit 1 ) ) - make deploy ${V:+V=1} OSX_DMG="${OUTDIR}/${DISTNAME}-${HOST}-unsigned.dmg" + make deploy ${V:+V=1} OSX_ZIP="${OUTDIR}/${DISTNAME}-${HOST}-unsigned.zip" ;; esac ( diff --git a/contrib/guix/libexec/codesign.sh b/contrib/guix/libexec/codesign.sh index 82ca52abff99e..f01ade9f9011d 100755 --- a/contrib/guix/libexec/codesign.sh +++ b/contrib/guix/libexec/codesign.sh @@ -85,11 +85,8 @@ mkdir -p "$DISTSRC" # Apply detached codesignatures to dist/ (in-place) signapple apply dist/Groestlcoin-Qt.app codesignatures/osx/dist - # Make a DMG from dist/ - xorrisofs -D -l -V "$(< osx_volname)" -no-pad -r -dir-mode 0755 \ - -o "${OUTDIR}/${DISTNAME}-${HOST}.dmg" \ - dist \ - -- -volume_date all_file_dates ="$SOURCE_DATE_EPOCH" + # Make a .zip from dist/ + zip "${OUTDIR}/${DISTNAME}-${HOST}.zip" dist/* ;; *) exit 1 diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm index b99eb5e8309ac..f0b7ebfea8e73 100644 --- a/contrib/guix/manifest.scm +++ b/contrib/guix/manifest.scm @@ -3,7 +3,6 @@ ((gnu packages bash) #:select (bash-minimal)) (gnu packages bison) ((gnu packages certs) #:select (nss-certs)) - ((gnu packages cdrom) #:select (xorriso)) ((gnu packages cmake) #:select (cmake-minimal)) (gnu packages commencement) (gnu packages compression) @@ -606,5 +605,5 @@ inspecting signatures in Mach-O binaries.") ((string-contains target "-linux-") (list (make-groestlcoin-cross-toolchain target))) ((string-contains target "darwin") - (list clang-toolchain-15 binutils cmake-minimal xorriso python-signapple)) + (list clang-toolchain-15 binutils cmake-minimal python-signapple zip)) (else '()))))) diff --git a/contrib/macdeploy/README.md b/contrib/macdeploy/README.md index f0a6aa7ff761a..4c970cb61966c 100644 --- a/contrib/macdeploy/README.md +++ b/contrib/macdeploy/README.md @@ -6,7 +6,7 @@ The `macdeployqtplus` script should not be run manually. Instead, after building make deploy ``` -When complete, it will have produced `Groestlcoin-Core.dmg`. +When complete, it will have produced `Groestlcoin-Core.zip`. ## SDK Extraction @@ -60,10 +60,10 @@ previous stage) as the first argument. The `sha256sum` of the generated TAR.GZ archive should be `df75d30ecafc429e905134333aeae56ac65fac67cb4182622398fd717df77619`. -## Deterministic macOS DMG Notes +## Deterministic macOS App Notes -Working macOS DMGs are created in Linux by combining a recent `clang`, the Apple -`binutils` (`ld`, `ar`, etc) and DMG authoring tools. +macOS Applications are created in Linux by combining a recent `clang` and the Apple +`binutils` (`ld`, `ar`, etc). Apple uses `clang` extensively for development and has upstreamed the necessary functionality so that a vanilla clang can take advantage. It supports the use of `-F`, @@ -93,20 +93,15 @@ created using these tools. The build process has been designed to avoid includin SDK's files in Guix's outputs. All interim tarballs are fully deterministic and may be freely redistributed. -[`xorrisofs`](https://www.gnu.org/software/xorriso/) is used to create the DMG. - -A background image is added to DMG files by inserting a `.DS_Store` during creation. - As of OS X 10.9 Mavericks, using an Apple-blessed key to sign binaries is a requirement in order to satisfy the new Gatekeeper requirements. Because this private key cannot be shared, we'll have to be a bit creative in order for the build process to remain somewhat deterministic. Here's how it works: -- Builders use Guix to create an unsigned release. This outputs an unsigned DMG which +- Builders use Guix to create an unsigned release. This outputs an unsigned ZIP which users may choose to bless and run. It also outputs an unsigned app structure in the form - of a tarball, which also contains all of the tools that have been previously (deterministically) - built in order to create a final DMG. + of a tarball. - The Apple keyholder uses this unsigned app to create a detached signature, using the script that is also included there. Detached signatures are available from this [repository](https://github.com/groestlcoin/groestlcoin-detached-sigs). - Builders feed the unsigned app + detached signature back into Guix. It uses the - pre-built tools to recombine the pieces into a deterministic DMG. + pre-built tools to recombine the pieces into a deterministic ZIP. diff --git a/contrib/macdeploy/background.tiff b/contrib/macdeploy/background.tiff deleted file mode 100644 index 1fb088c8374ac..0000000000000 Binary files a/contrib/macdeploy/background.tiff and /dev/null differ diff --git a/contrib/macdeploy/macdeployqtplus b/contrib/macdeploy/macdeployqtplus index 8801834f77b48..23e39666b4fea 100644 --- a/contrib/macdeploy/macdeployqtplus +++ b/contrib/macdeploy/macdeployqtplus @@ -18,8 +18,6 @@ import sys, re, os, platform, shutil, stat, subprocess, os.path from argparse import ArgumentParser -from ds_store import DSStore -from mac_alias import Alias from pathlib import Path from subprocess import PIPE, run from typing import List, Optional @@ -385,7 +383,7 @@ def deployPlugins(appBundleInfo: ApplicationBundleInfo, deploymentInfo: Deployme ap = ArgumentParser(description="""Improved version of macdeployqt. -Outputs a ready-to-deploy app in a folder "dist" and optionally wraps it in a .dmg file. +Outputs a ready-to-deploy app in a folder "dist" and optionally wraps it in a .zip file. Note, that the "dist" folder will be deleted before deploying on each run. Optionally, Qt translation files (.qm) can be added to the bundle.""") @@ -395,8 +393,8 @@ ap.add_argument("appname", nargs=1, metavar="appname", help="name of the app bei ap.add_argument("-verbose", nargs="?", const=True, help="Output additional debugging information") ap.add_argument("-no-plugins", dest="plugins", action="store_false", default=True, help="skip plugin deployment") ap.add_argument("-no-strip", dest="strip", action="store_false", default=True, help="don't run 'strip' on the binaries") -ap.add_argument("-dmg", nargs="?", const="", metavar="basename", help="create a .dmg disk image") ap.add_argument("-translations-dir", nargs=1, metavar="path", default=None, help="Path to Qt's translations. Base translations will automatically be added to the bundle's resources.") +ap.add_argument("-zip", nargs="?", const="", metavar="zip", help="create a .zip containing the app bundle") config = ap.parse_args() @@ -417,12 +415,9 @@ if os.path.exists("dist"): print("+ Removing existing dist folder +") shutil.rmtree("dist") -if os.path.exists(appname + ".dmg"): - print("+ Removing existing DMG +") - os.unlink(appname + ".dmg") - -if os.path.exists(appname + ".temp.dmg"): - os.unlink(appname + ".temp.dmg") +if os.path.exists(appname + ".zip"): + print("+ Removing existing .zip +") + os.unlink(appname + ".zip") # ------------------------------------------------ @@ -497,99 +492,13 @@ with open(os.path.join(applicationBundle.resourcesPath, "qt.conf"), "wb") as f: # ------------------------------------------------ -print("+ Generating .DS_Store +") - -output_file = os.path.join("dist", ".DS_Store") - -ds = DSStore.open(output_file, 'w+') - -ds['.']['bwsp'] = { - 'WindowBounds': '{{300, 280}, {500, 343}}', - 'PreviewPaneVisibility': False, -} - -icvp = { - 'gridOffsetX': 0.0, - 'textSize': 12.0, - 'viewOptionsVersion': 1, - 'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00', - 'backgroundColorBlue': 1.0, - 'iconSize': 96.0, - 'backgroundColorGreen': 1.0, - 'arrangeBy': 'none', - 'showIconPreview': True, - 'gridSpacing': 100.0, - 'gridOffsetY': 0.0, - 'showItemInfo': False, - 'labelOnBottom': True, - 'backgroundType': 2, - 'backgroundColorRed': 1.0 -} -alias = Alias().from_bytes(icvp['backgroundImageAlias']) -alias.volume.name = appname -alias.volume.posix_path = '/Volumes/' + appname -icvp['backgroundImageAlias'] = alias.to_bytes() -ds['.']['icvp'] = icvp - -ds['.']['vSrn'] = ('long', 1) - -ds['Applications']['Iloc'] = (370, 156) -ds['Groestlcoin-Qt.app']['Iloc'] = (128, 156) - -ds.flush() -ds.close() - -# ------------------------------------------------ - if platform.system() == "Darwin": subprocess.check_call(f"codesign --deep --force --sign - {target}", shell=True) -print("+ Installing background.tiff +") - -bg_path = os.path.join('dist', '.background', 'background.tiff') -os.mkdir(os.path.dirname(bg_path)) - -tiff_path = os.path.join('contrib', 'macdeploy', 'background.tiff') -shutil.copy2(tiff_path, bg_path) - -# ------------------------------------------------ - -print("+ Generating symlink for /Applications +") - -os.symlink("/Applications", os.path.join('dist', "Applications")) - # ------------------------------------------------ -if config.dmg is not None: - - print("+ Preparing .dmg disk image +") - - if verbose: - print("Determining size of \"dist\"...") - size = 0 - for path, dirs, files in os.walk("dist"): - for file in files: - size += os.path.getsize(os.path.join(path, file)) - size += int(size * 0.15) - - if verbose: - print("Creating temp image for modification...") - - tempname: str = appname + ".temp.dmg" - - run(["hdiutil", "create", tempname, "-srcfolder", "dist", "-format", "UDRW", "-size", str(size), "-volname", appname], check=True, text=True) - - if verbose: - print("Attaching temp image...") - output = run(["hdiutil", "attach", tempname, "-readwrite"], check=True, text=True, stdout=PIPE).stdout - - print("+ Finalizing .dmg disk image +") - - run(["hdiutil", "detach", f"/Volumes/{appname}"], text=True) - - run(["hdiutil", "convert", tempname, "-format", "UDZO", "-o", appname, "-imagekey", "zlib-level=9"], check=True, text=True) - - os.unlink(tempname) +if config.zip is not None: + shutil.make_archive('{}'.format(appname), format='zip', root_dir='dist', base_dir='Groestlcoin-Qt.app') # ------------------------------------------------ diff --git a/depends/README.md b/depends/README.md index c2846a8a65507..cdcf7180da64b 100644 --- a/depends/README.md +++ b/depends/README.md @@ -48,7 +48,7 @@ The paths are automatically configured and no other options are needed unless ta #### For macOS cross compilation - sudo apt-get install curl bsdmainutils cmake libz-dev python3-setuptools xorriso + sudo apt-get install curl bsdmainutils cmake libz-dev python3-setuptools zip Note: You must obtain the macOS SDK before proceeding with a cross-compile. Under the depends directory, create a subdirectory named `SDKs`. diff --git a/depends/packages/native_ds_store.mk b/depends/packages/native_ds_store.mk deleted file mode 100644 index 51a95f48ef7b1..0000000000000 --- a/depends/packages/native_ds_store.mk +++ /dev/null @@ -1,15 +0,0 @@ -package=native_ds_store -$(package)_version=1.3.0 -$(package)_download_path=https://github.com/dmgbuild/ds_store/archive/ -$(package)_file_name=v$($(package)_version).tar.gz -$(package)_sha256_hash=76b3280cd4e19e5179defa23fb594a9dd32643b0c80d774bd3108361d94fb46d -$(package)_install_libdir=$(build_prefix)/lib/python3/dist-packages - -define $(package)_build_cmds - python3 setup.py build -endef - -define $(package)_stage_cmds - mkdir -p $($(package)_install_libdir) && \ - python3 setup.py install --root=$($(package)_staging_dir) --prefix=$(build_prefix) --install-lib=$($(package)_install_libdir) -endef diff --git a/depends/packages/native_mac_alias.mk b/depends/packages/native_mac_alias.mk deleted file mode 100644 index ddd631186edf7..0000000000000 --- a/depends/packages/native_mac_alias.mk +++ /dev/null @@ -1,15 +0,0 @@ -package=native_mac_alias -$(package)_version=2.2.0 -$(package)_download_path=https://github.com/dmgbuild/mac_alias/archive/ -$(package)_file_name=v$($(package)_version).tar.gz -$(package)_sha256_hash=421e6d7586d1f155c7db3e7da01ca0dacc9649a509a253ad7077b70174426499 -$(package)_install_libdir=$(build_prefix)/lib/python3/dist-packages - -define $(package)_build_cmds - python3 setup.py build -endef - -define $(package)_stage_cmds - mkdir -p $($(package)_install_libdir) && \ - python3 setup.py install --root=$($(package)_staging_dir) --prefix=$(build_prefix) --install-lib=$($(package)_install_libdir) -endef diff --git a/depends/packages/packages.mk b/depends/packages/packages.mk index b3600b72d0b54..dd55c939cbb1d 100644 --- a/depends/packages/packages.mk +++ b/depends/packages/packages.mk @@ -27,7 +27,7 @@ multiprocess_native_packages = native_libmultiprocess native_capnp usdt_linux_packages=systemtap -darwin_native_packages = native_ds_store native_mac_alias +darwin_native_packages = ifneq ($(build_os),darwin) darwin_native_packages += native_cctools native_libtapi diff --git a/doc/build-osx.md b/doc/build-osx.md index 0bc06e7729764..15ab4b84c84a6 100644 --- a/doc/build-osx.md +++ b/doc/build-osx.md @@ -163,14 +163,8 @@ brew install python #### Deploy Dependencies -You can deploy a `.dmg` containing the Groestlcoin Core application using `make deploy`. -This command depends on a couple of python packages, so it is required that you have `python` installed. - -Ensuring that `python` is installed, you can install the deploy dependencies by running the following commands in your terminal: - -``` bash -pip3 install ds_store mac_alias -``` +You can deploy a `.zip` containing the Groestlcoin Core application using `make deploy`. +It is required that you have `python` installed. ## Building Groestlcoin Core @@ -230,7 +224,7 @@ make check # Run tests if Python 3 is available ### 3. Deploy (optional) -You can also create a `.dmg` containing the `.app` bundle by running the following command: +You can also create a `.zip` containing the `.app` bundle by running the following command: ``` bash make deploy diff --git a/doc/policy/packages.md b/doc/policy/packages.md index 09f8979cdfcdf..57bb9b3f925af 100644 --- a/doc/policy/packages.md +++ b/doc/policy/packages.md @@ -18,15 +18,18 @@ tip or some preceding transaction in the package. The following rules are enforced for all packages: -* Packages cannot exceed `MAX_PACKAGE_COUNT=25` count and `MAX_PACKAGE_SIZE=101KvB` total size. +* Packages cannot exceed `MAX_PACKAGE_COUNT=25` count and `MAX_PACKAGE_WEIGHT=404000` total weight - - *Rationale*: This is already enforced as mempool ancestor/descendant limits. If - transactions in a package are all related, exceeding this limit would mean that the package - can either be split up or it wouldn't pass individual mempool policy. + - *Rationale*: We want package size to be as small as possible to mitigate DoS via package + validation. However, we want to make sure that the limit does not restrict ancestor + packages that would be allowed if submitted individually. - Note that, if these mempool limits change, package limits should be reconsidered. Users may also configure their mempool limits differently. + - Note that the this is transaction weight, not "virtual" size as with other limits to allow + simpler context-less checks. + * Packages must be topologically sorted. * Packages cannot have conflicting transactions, i.e. no two transactions in a package can spend diff --git a/doc/release-process.md b/doc/release-process.md index 8fa6290580b9e..37dcd14ae38f6 100644 --- a/doc/release-process.md +++ b/doc/release-process.md @@ -90,7 +90,7 @@ git -C ./guix.sigs pull ### Create the macOS SDK tarball (first time, or when SDK version changes) Create the macOS SDK tarball, see the [macdeploy -instructions](/contrib/macdeploy/README.md#deterministic-macos-dmg-notes) for +instructions](/contrib/macdeploy/README.md#deterministic-macos-app-notes) for details. ### Build and attest to build outputs diff --git a/src/interfaces/wallet.h b/src/interfaces/wallet.h index 9987681367a3f..4b896c11a37e6 100644 --- a/src/interfaces/wallet.h +++ b/src/interfaces/wallet.h @@ -51,6 +51,7 @@ struct WalletBalances; struct WalletTx; struct WalletTxOut; struct WalletTxStatus; +struct WalletMigrationResult; using WalletOrderForm = std::vector>; using WalletValueMap = std::map; @@ -333,6 +334,9 @@ class WalletLoader : public ChainClient //! Restore backup wallet virtual util::Result> restoreWallet(const fs::path& backup_file, const std::string& wallet_name, std::vector& warnings) = 0; + //! Migrate a wallet + virtual util::Result migrateWallet(const std::string& name, const SecureString& passphrase) = 0; + //! Return available wallets in wallet directory. virtual std::vector listWalletDir() = 0; @@ -389,6 +393,7 @@ struct WalletTx CTransactionRef tx; std::vector txin_is_mine; std::vector txout_is_mine; + std::vector txout_is_change; std::vector txout_address; std::vector txout_address_is_mine; CAmount credit; @@ -424,6 +429,15 @@ struct WalletTxOut bool is_spent = false; }; +//! Migrated wallet info +struct WalletMigrationResult +{ + std::unique_ptr wallet; + std::optional watchonly_wallet_name; + std::optional solvables_wallet_name; + fs::path backup_path; +}; + //! Return implementation of Wallet interface. This function is defined in //! dummywallet.cpp and throws if the wallet component is not compiled. std::unique_ptr MakeWallet(wallet::WalletContext& context, const std::shared_ptr& wallet); diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index 836664edcb007..c58586528f58f 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -160,7 +160,6 @@ class CMainParams : public CChainParams { vFixedSeeds = std::vector(std::begin(chainparams_seed_main), std::end(chainparams_seed_main)); fDefaultConsistencyChecks = false; - m_is_test_chain = false; m_is_mockable_chain = false; checkpointData = { @@ -257,7 +256,6 @@ class CTestNetParams : public CChainParams { vFixedSeeds = std::vector(std::begin(chainparams_seed_test), std::end(chainparams_seed_test)); fDefaultConsistencyChecks = false; - m_is_test_chain = true; m_is_mockable_chain = false; checkpointData = { @@ -379,7 +377,6 @@ class SigNetParams : public CChainParams { bech32_hrp = "tgrs"; fDefaultConsistencyChecks = false; - m_is_test_chain = true; m_is_mockable_chain = false; } }; @@ -470,7 +467,6 @@ class CRegTestParams : public CChainParams vSeeds.emplace_back("dummySeed.invalid."); fDefaultConsistencyChecks = true; - m_is_test_chain = true; m_is_mockable_chain = true; checkpointData = { diff --git a/src/kernel/chainparams.h b/src/kernel/chainparams.h index 63837bb23edea..ec1697493c96e 100644 --- a/src/kernel/chainparams.h +++ b/src/kernel/chainparams.h @@ -93,7 +93,7 @@ class CChainParams /** Default value for -checkmempool and -checkblockindex argument */ bool DefaultConsistencyChecks() const { return fDefaultConsistencyChecks; } /** If this chain is exclusively used for testing */ - bool IsTestChain() const { return m_is_test_chain; } + bool IsTestChain() const { return m_chain_type != ChainType::MAIN; } /** If this chain allows time to be mocked */ bool IsMockableChain() const { return m_is_mockable_chain; } uint64_t PruneAfterHeight() const { return nPruneAfterHeight; } @@ -167,7 +167,6 @@ class CChainParams CBlock genesis; std::vector vFixedSeeds; bool fDefaultConsistencyChecks; - bool m_is_test_chain; bool m_is_mockable_chain; CCheckpointData checkpointData; MapAssumeutxo m_assumeutxo_data; diff --git a/src/net.cpp b/src/net.cpp index 5711f1582a64b..73673daf53eec 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -159,39 +159,34 @@ uint16_t GetListenPort() return static_cast(gArgs.GetIntArg("-port", Params().GetDefaultPort())); } -// find 'best' local address for a particular peer -bool GetLocal(CService& addr, const CNode& peer) +// Determine the "best" local address for a particular peer. +[[nodiscard]] static std::optional GetLocal(const CNode& peer) { - if (!fListen) - return false; + if (!fListen) return std::nullopt; + std::optional addr; int nBestScore = -1; int nBestReachability = -1; { LOCK(g_maplocalhost_mutex); - for (const auto& entry : mapLocalHost) - { + for (const auto& [local_addr, local_service_info] : mapLocalHost) { // For privacy reasons, don't advertise our privacy-network address // to other networks and don't advertise our other-network address // to privacy networks. - const Network our_net{entry.first.GetNetwork()}; - const Network peers_net{peer.ConnectedThroughNetwork()}; - if (our_net != peers_net && - (our_net == NET_ONION || our_net == NET_I2P || - peers_net == NET_ONION || peers_net == NET_I2P)) { + if (local_addr.GetNetwork() != peer.ConnectedThroughNetwork() + && (local_addr.IsPrivacyNet() || peer.IsConnectedThroughPrivacyNet())) { continue; } - int nScore = entry.second.nScore; - int nReachability = entry.first.GetReachabilityFrom(peer.addr); - if (nReachability > nBestReachability || (nReachability == nBestReachability && nScore > nBestScore)) - { - addr = CService(entry.first, entry.second.nPort); + const int nScore{local_service_info.nScore}; + const int nReachability{local_addr.GetReachabilityFrom(peer.addr)}; + if (nReachability > nBestReachability || (nReachability == nBestReachability && nScore > nBestScore)) { + addr.emplace(CService{local_addr, local_service_info.nPort}); nBestReachability = nReachability; nBestScore = nScore; } } } - return nBestScore >= 0; + return addr; } //! Convert the serialized seeds into usable address objects. @@ -217,17 +212,13 @@ static std::vector ConvertSeeds(const std::vector &vSeedsIn) return vSeedsOut; } -// get best local address for a particular peer as a CAddress -// Otherwise, return the unroutable 0.0.0.0 but filled in with +// Determine the "best" local address for a particular peer. +// If none, return the unroutable 0.0.0.0 but filled in with // the normal parameters, since the IP may be changed to a useful // one by discovery. CService GetLocalAddress(const CNode& peer) { - CService addr; - if (GetLocal(addr, peer)) { - return addr; - } - return CService{CNetAddr(), GetListenPort()}; + return GetLocal(peer).value_or(CService{CNetAddr(), GetListenPort()}); } static int GetnScore(const CService& addr) @@ -238,7 +229,7 @@ static int GetnScore(const CService& addr) } // Is our peer's addrLocal potentially useful as an external IP source? -bool IsPeerAddrLocalGood(CNode *pnode) +[[nodiscard]] static bool IsPeerAddrLocalGood(CNode *pnode) { CService addrLocal = pnode->GetAddrLocal(); return fDiscover && pnode->addr.IsRoutable() && addrLocal.IsRoutable() && @@ -290,7 +281,7 @@ std::optional GetLocalAddrForPeer(CNode& node) CService MaybeFlipIPv6toCJDNS(const CService& service) { CService ret{service}; - if (ret.m_net == NET_IPV6 && ret.m_addr[0] == 0xfc && IsReachable(NET_CJDNS)) { + if (ret.IsIPv6() && ret.HasCJDNSPrefix() && IsReachable(NET_CJDNS)) { ret.m_net = NET_CJDNS; } return ret; @@ -506,7 +497,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo const bool use_proxy{GetProxy(addrConnect.GetNetwork(), proxy)}; bool proxyConnectionFailed = false; - if (addrConnect.GetNetwork() == NET_I2P && use_proxy) { + if (addrConnect.IsI2P() && use_proxy) { i2p::Connection conn; if (m_i2p_sam_session) { @@ -638,6 +629,11 @@ Network CNode::ConnectedThroughNetwork() const return m_inbound_onion ? NET_ONION : addr.GetNetClass(); } +bool CNode::IsConnectedThroughPrivacyNet() const +{ + return m_inbound_onion || addr.IsPrivacyNet(); +} + #undef X #define X(name) stats.name = name void CNode::CopyStats(CNodeStats& stats) @@ -3754,10 +3750,11 @@ uint64_t CConnman::CalculateKeyedNetGroup(const CAddress& address) const return GetDeterministicRandomizer(RANDOMIZER_ID_NETGROUP).Write(vchNetGroup).Finalize(); } -void CaptureMessageToFile(const CAddress& addr, - const std::string& msg_type, - Span data, - bool is_incoming) +// Dump binary message to file, with timestamp. +static void CaptureMessageToFile(const CAddress& addr, + const std::string& msg_type, + Span data, + bool is_incoming) { // Note: This function captures the message at the time of processing, // not at socket receive/send time. diff --git a/src/net.h b/src/net.h index d161add578148..8a3a6c2779ce6 100644 --- a/src/net.h +++ b/src/net.h @@ -151,7 +151,6 @@ enum LOCAL_MAX }; -bool IsPeerAddrLocalGood(CNode *pnode); /** Returns a local address that we should advertise to this peer. */ std::optional GetLocalAddrForPeer(CNode& node); @@ -170,7 +169,6 @@ bool AddLocal(const CNetAddr& addr, int nScore = LOCAL_NONE); void RemoveLocal(const CService& addr); bool SeenLocal(const CService& addr); bool IsLocal(const CService& addr); -bool GetLocal(CService& addr, const CNode& peer); CService GetLocalAddress(const CNode& peer); CService MaybeFlipIPv6toCJDNS(const CService& service); @@ -835,6 +833,9 @@ class CNode */ Network ConnectedThroughNetwork() const; + /** Whether this peer connected through a privacy network. */ + [[nodiscard]] bool IsConnectedThroughPrivacyNet() const; + // We selected peer as (compact blocks) high-bandwidth peer (BIP152) std::atomic m_bip152_highbandwidth_to{false}; // Peer selected us as (compact blocks) high-bandwidth peer (BIP152) @@ -1576,12 +1577,6 @@ class CConnman friend struct ConnmanTestMsg; }; -/** Dump binary message to file, with timestamp */ -void CaptureMessageToFile(const CAddress& addr, - const std::string& msg_type, - Span data, - bool is_incoming); - /** Defaults to `CaptureMessageToFile()`, but can be overridden by unit tests. */ extern std::function{0x20, 0x01, 0x04, 0x70}); } -/** - * Check whether this object represents a TOR address. - * @see CNetAddr::SetSpecial(const std::string &) - */ -bool CNetAddr::IsTor() const { return m_net == NET_ONION; } - -/** - * Check whether this object represents an I2P address. - */ -bool CNetAddr::IsI2P() const { return m_net == NET_I2P; } - -/** - * Check whether this object represents a CJDNS address. - */ -bool CNetAddr::IsCJDNS() const { return m_net == NET_CJDNS; } - bool CNetAddr::IsLocal() const { // IPv4 loopback (127.0.0.0/8 or 0.0.0.0/8) @@ -450,8 +430,7 @@ bool CNetAddr::IsValid() const return false; } - // CJDNS addresses always start with 0xfc - if (IsCJDNS() && (m_addr[0] != 0xFC)) { + if (IsCJDNS() && !HasCJDNSPrefix()) { return false; } diff --git a/src/netaddress.h b/src/netaddress.h index 7d3659a11af55..ad09f167991b1 100644 --- a/src/netaddress.h +++ b/src/netaddress.h @@ -154,8 +154,8 @@ class CNetAddr bool SetSpecial(const std::string& addr); bool IsBindAny() const; // INADDR_ANY equivalent - bool IsIPv4() const; // IPv4 mapped address (::FFFF:0:0/96, 0.0.0.0/0) - bool IsIPv6() const; // IPv6 address (not mapped IPv4, not Tor) + [[nodiscard]] bool IsIPv4() const { return m_net == NET_IPV4; } // IPv4 mapped address (::FFFF:0:0/96, 0.0.0.0/0) + [[nodiscard]] bool IsIPv6() const { return m_net == NET_IPV6; } // IPv6 address (not mapped IPv4, not Tor) bool IsRFC1918() const; // IPv4 private networks (10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12) bool IsRFC2544() const; // IPv4 inter-network communications (198.18.0.0/15) bool IsRFC6598() const; // IPv4 ISP-level NAT (100.64.0.0/10) @@ -171,14 +171,22 @@ class CNetAddr bool IsRFC6052() const; // IPv6 well-known prefix for IPv4-embedded address (64:FF9B::/96) bool IsRFC6145() const; // IPv6 IPv4-translated address (::FFFF:0:0:0/96) (actually defined in RFC2765) bool IsHeNet() const; // IPv6 Hurricane Electric - https://he.net (2001:0470::/36) - bool IsTor() const; - bool IsI2P() const; - bool IsCJDNS() const; + [[nodiscard]] bool IsTor() const { return m_net == NET_ONION; } + [[nodiscard]] bool IsI2P() const { return m_net == NET_I2P; } + [[nodiscard]] bool IsCJDNS() const { return m_net == NET_CJDNS; } + [[nodiscard]] bool HasCJDNSPrefix() const { return m_addr[0] == 0xfc; } bool IsLocal() const; bool IsRoutable() const; bool IsInternal() const; bool IsValid() const; + /** + * Whether this object is a privacy network. + * TODO: consider adding IsCJDNS() here when more peers adopt CJDNS, see: + * https://github.com/bitcoin/bitcoin/pull/27411#issuecomment-1497176155 + */ + [[nodiscard]] bool IsPrivacyNet() const { return IsTor() || IsI2P(); } + /** * Check if the current object can be serialized in pre-ADDRv2/BIP155 format. */ diff --git a/src/policy/packages.cpp b/src/policy/packages.cpp index a901ef8f38c7a..fd272a2642e4c 100644 --- a/src/policy/packages.cpp +++ b/src/policy/packages.cpp @@ -23,10 +23,10 @@ bool CheckPackage(const Package& txns, PackageValidationState& state) return state.Invalid(PackageValidationResult::PCKG_POLICY, "package-too-many-transactions"); } - const int64_t total_size = std::accumulate(txns.cbegin(), txns.cend(), 0, - [](int64_t sum, const auto& tx) { return sum + GetVirtualTransactionSize(*tx); }); - // If the package only contains 1 tx, it's better to report the policy violation on individual tx size. - if (package_count > 1 && total_size > MAX_PACKAGE_SIZE * 1000) { + const int64_t total_weight = std::accumulate(txns.cbegin(), txns.cend(), 0, + [](int64_t sum, const auto& tx) { return sum + GetTransactionWeight(*tx); }); + // If the package only contains 1 tx, it's better to report the policy violation on individual tx weight. + if (package_count > 1 && total_weight > MAX_PACKAGE_WEIGHT) { return state.Invalid(PackageValidationResult::PCKG_POLICY, "package-too-large"); } diff --git a/src/policy/packages.h b/src/policy/packages.h index 0a0e7cf6bb85e..702667b8ade8d 100644 --- a/src/policy/packages.h +++ b/src/policy/packages.h @@ -15,18 +15,22 @@ /** Default maximum number of transactions in a package. */ static constexpr uint32_t MAX_PACKAGE_COUNT{25}; -/** Default maximum total virtual size of transactions in a package in KvB. */ -static constexpr uint32_t MAX_PACKAGE_SIZE{101}; -static_assert(MAX_PACKAGE_SIZE * WITNESS_SCALE_FACTOR * 1000 >= MAX_STANDARD_TX_WEIGHT); +/** Default maximum total weight of transactions in a package in weight + to allow for context-less checks. This must allow a superset of sigops + weighted vsize limited transactions to not disallow transactions we would + have otherwise accepted individually. */ +static constexpr uint32_t MAX_PACKAGE_WEIGHT = 404'000; +static_assert(MAX_PACKAGE_WEIGHT >= MAX_STANDARD_TX_WEIGHT); -// If a package is submitted, it must be within the mempool's ancestor/descendant limits. Since a -// submitted package must be child-with-unconfirmed-parents (all of the transactions are an ancestor +// If a package is to be evaluated, it must be at least as large as the mempool's ancestor/descendant limits, +// otherwise transactions that would be individually accepted may be rejected in a package erroneously. +// Since a submitted package must be child-with-unconfirmed-parents (all of the transactions are an ancestor // of the child), package limits are ultimately bounded by mempool package limits. Ensure that the // defaults reflect this constraint. static_assert(DEFAULT_DESCENDANT_LIMIT >= MAX_PACKAGE_COUNT); static_assert(DEFAULT_ANCESTOR_LIMIT >= MAX_PACKAGE_COUNT); -static_assert(DEFAULT_ANCESTOR_SIZE_LIMIT_KVB >= MAX_PACKAGE_SIZE); -static_assert(DEFAULT_DESCENDANT_SIZE_LIMIT_KVB >= MAX_PACKAGE_SIZE); +static_assert(MAX_PACKAGE_WEIGHT >= DEFAULT_ANCESTOR_SIZE_LIMIT_KVB * WITNESS_SCALE_FACTOR * 1000); +static_assert(MAX_PACKAGE_WEIGHT >= DEFAULT_DESCENDANT_SIZE_LIMIT_KVB * WITNESS_SCALE_FACTOR * 1000); /** A "reason" why a package was invalid. It may be that one or more of the included * transactions is invalid or the package itself violates our rules. @@ -47,7 +51,7 @@ class PackageValidationState : public ValidationState { /** Context-free package policy checks: * 1. The number of transactions cannot exceed MAX_PACKAGE_COUNT. - * 2. The total virtual size cannot exceed MAX_PACKAGE_SIZE. + * 2. The total weight cannot exceed MAX_PACKAGE_WEIGHT. * 3. If any dependencies exist between transactions, parents must appear before children. * 4. Transactions cannot conflict, i.e., spend the same inputs. */ diff --git a/src/qt/askpassphrasedialog.cpp b/src/qt/askpassphrasedialog.cpp index 0a96be038b22a..246dff0069215 100644 --- a/src/qt/askpassphrasedialog.cpp +++ b/src/qt/askpassphrasedialog.cpp @@ -167,6 +167,9 @@ void AskPassphraseDialog::accept() "passphrase to avoid this issue in the future.")); } } else { + if (m_passphrase_out) { + m_passphrase_out->assign(oldpass); + } QDialog::accept(); // Success } } catch (const std::runtime_error& e) { diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp index b84cd02bdade9..2862dddb56a40 100644 --- a/src/qt/bitcoingui.cpp +++ b/src/qt/bitcoingui.cpp @@ -359,6 +359,10 @@ void BitcoinGUI::createActions() m_close_all_wallets_action = new QAction(tr("Close All Wallets…"), this); m_close_all_wallets_action->setStatusTip(tr("Close all wallets")); + m_migrate_wallet_action = new QAction(tr("Migrate Wallet"), this); + m_migrate_wallet_action->setEnabled(false); + m_migrate_wallet_action->setStatusTip(tr("Migrate a wallet")); + showHelpMessageAction = new QAction(tr("&Command-line options"), this); showHelpMessageAction->setMenuRole(QAction::NoRole); showHelpMessageAction->setStatusTip(tr("Show the %1 help message to get a list with possible Bitcoin command-line options").arg(PACKAGE_NAME)); @@ -459,6 +463,11 @@ void BitcoinGUI::createActions() connect(m_close_all_wallets_action, &QAction::triggered, [this] { m_wallet_controller->closeAllWallets(this); }); + connect(m_migrate_wallet_action, &QAction::triggered, [this] { + auto activity = new MigrateWalletActivity(m_wallet_controller, this); + connect(activity, &MigrateWalletActivity::migrated, this, &BitcoinGUI::setCurrentWallet); + activity->migrate(walletFrame->currentWalletModel()); + }); connect(m_mask_values_action, &QAction::toggled, this, &BitcoinGUI::setPrivacy); connect(m_mask_values_action, &QAction::toggled, this, &BitcoinGUI::enableHistoryAction); } @@ -486,6 +495,7 @@ void BitcoinGUI::createMenuBar() file->addAction(m_open_wallet_action); file->addAction(m_close_wallet_action); file->addAction(m_close_all_wallets_action); + file->addAction(m_migrate_wallet_action); file->addSeparator(); file->addAction(backupWalletAction); file->addAction(m_restore_wallet_action); @@ -770,6 +780,7 @@ void BitcoinGUI::setCurrentWallet(WalletModel* wallet_model) } } updateWindowTitle(); + m_migrate_wallet_action->setEnabled(wallet_model->wallet().isLegacy()); } void BitcoinGUI::setCurrentWalletBySelectorIndex(int index) @@ -803,6 +814,7 @@ void BitcoinGUI::setWalletActionsEnabled(bool enabled) openAction->setEnabled(enabled); m_close_wallet_action->setEnabled(enabled); m_close_all_wallets_action->setEnabled(enabled); + m_migrate_wallet_action->setEnabled(enabled); } void BitcoinGUI::createTrayIcon() diff --git a/src/qt/bitcoingui.h b/src/qt/bitcoingui.h index 4e1f05255a724..510561454bcdb 100644 --- a/src/qt/bitcoingui.h +++ b/src/qt/bitcoingui.h @@ -163,6 +163,8 @@ class BitcoinGUI : public QMainWindow QAction* m_wallet_selector_label_action = nullptr; QAction* m_wallet_selector_action = nullptr; QAction* m_mask_values_action{nullptr}; + QAction* m_migrate_wallet_action{nullptr}; + QMenu* m_migrate_wallet_menu{nullptr}; QLabel *m_wallet_selector_label = nullptr; QComboBox* m_wallet_selector = nullptr; diff --git a/src/qt/createwalletdialog.cpp b/src/qt/createwalletdialog.cpp index 5b3c8bcf48124..3e8be3e6754dd 100644 --- a/src/qt/createwalletdialog.cpp +++ b/src/qt/createwalletdialog.cpp @@ -58,10 +58,7 @@ CreateWalletDialog::CreateWalletDialog(QWidget* parent) : ui->descriptor_checkbox->setChecked(checked); ui->encrypt_wallet_checkbox->setChecked(false); ui->disable_privkeys_checkbox->setChecked(checked); - // The blank check box is ambiguous. This flag is always true for a - // watch-only wallet, even though we immedidately fetch keys from the - // external signer. - ui->blank_wallet_checkbox->setChecked(checked); + ui->blank_wallet_checkbox->setChecked(false); }); connect(ui->disable_privkeys_checkbox, &QCheckBox::toggled, [this](bool checked) { @@ -69,9 +66,10 @@ CreateWalletDialog::CreateWalletDialog(QWidget* parent) : // set to true, enable it when isDisablePrivateKeysChecked is false. ui->encrypt_wallet_checkbox->setEnabled(!checked); - // Wallets without private keys start out blank + // Wallets without private keys cannot set blank + ui->blank_wallet_checkbox->setEnabled(!checked); if (checked) { - ui->blank_wallet_checkbox->setChecked(true); + ui->blank_wallet_checkbox->setChecked(false); } // When the encrypt_wallet_checkbox is disabled, uncheck it. @@ -81,8 +79,11 @@ CreateWalletDialog::CreateWalletDialog(QWidget* parent) : }); connect(ui->blank_wallet_checkbox, &QCheckBox::toggled, [this](bool checked) { - if (!checked) { - ui->disable_privkeys_checkbox->setChecked(false); + // Disable the disable_privkeys_checkbox when blank_wallet_checkbox is checked + // as blank-ness only pertains to wallets with private keys. + ui->disable_privkeys_checkbox->setEnabled(!checked); + if (checked) { + ui->disable_privkeys_checkbox->setChecked(false); } }); diff --git a/src/qt/transactionrecord.cpp b/src/qt/transactionrecord.cpp index 193bcb2d2686d..e7d518bf46c3b 100644 --- a/src/qt/transactionrecord.cpp +++ b/src/qt/transactionrecord.cpp @@ -13,6 +13,7 @@ #include +using wallet::ISMINE_NO; using wallet::ISMINE_SPENDABLE; using wallet::ISMINE_WATCH_ONLY; using wallet::isminetype; @@ -39,99 +40,52 @@ QList TransactionRecord::decomposeTransaction(const interface uint256 hash = wtx.tx->GetHash(); std::map mapValue = wtx.value_map; - if (nNet > 0 || wtx.is_coinbase) - { - // - // Credit - // - for(unsigned int i = 0; i < wtx.tx->vout.size(); i++) - { - const CTxOut& txout = wtx.tx->vout[i]; - isminetype mine = wtx.txout_is_mine[i]; - if(mine) - { - TransactionRecord sub(hash, nTime); - sub.idx = i; // vout index - sub.credit = txout.nValue; - sub.involvesWatchAddress = mine & ISMINE_WATCH_ONLY; - if (wtx.txout_address_is_mine[i]) - { - // Received by Groestlcoin Address - sub.type = TransactionRecord::RecvWithAddress; - sub.address = EncodeDestination(wtx.txout_address[i]); - } - else - { - // Received by IP connection (deprecated features), or a multisignature or other non-simple transaction - sub.type = TransactionRecord::RecvFromOther; - sub.address = mapValue["from"]; - } - if (wtx.is_coinbase) - { - // Generated - sub.type = TransactionRecord::Generated; - } - - parts.append(sub); - } - } - } - else - { - bool involvesWatchAddress = false; - isminetype fAllFromMe = ISMINE_SPENDABLE; + bool involvesWatchAddress = false; + isminetype fAllFromMe = ISMINE_SPENDABLE; + bool any_from_me = false; + if (wtx.is_coinbase) { + fAllFromMe = ISMINE_NO; + } else { for (const isminetype mine : wtx.txin_is_mine) { if(mine & ISMINE_WATCH_ONLY) involvesWatchAddress = true; if(fAllFromMe > mine) fAllFromMe = mine; + if (mine) any_from_me = true; } + } - isminetype fAllToMe = ISMINE_SPENDABLE; + if (fAllFromMe || !any_from_me) { for (const isminetype mine : wtx.txout_is_mine) { if(mine & ISMINE_WATCH_ONLY) involvesWatchAddress = true; - if(fAllToMe > mine) fAllToMe = mine; } - if (fAllFromMe && fAllToMe) - { - // Payment to self - std::string address; - for (auto it = wtx.txout_address.begin(); it != wtx.txout_address.end(); ++it) { - if (it != wtx.txout_address.begin()) address += ", "; - address += EncodeDestination(*it); - } + CAmount nTxFee = nDebit - wtx.tx->GetValueOut(); - CAmount nChange = wtx.change; - parts.append(TransactionRecord(hash, nTime, TransactionRecord::SendToSelf, address, -(nDebit - nChange), nCredit - nChange)); - parts.last().involvesWatchAddress = involvesWatchAddress; // maybe pass to TransactionRecord as constructor argument - } - else if (fAllFromMe) + for(unsigned int i = 0; i < wtx.tx->vout.size(); i++) { - // - // Debit - // - CAmount nTxFee = nDebit - wtx.tx->GetValueOut(); - - for (unsigned int nOut = 0; nOut < wtx.tx->vout.size(); nOut++) - { - const CTxOut& txout = wtx.tx->vout[nOut]; - TransactionRecord sub(hash, nTime); - sub.idx = nOut; - sub.involvesWatchAddress = involvesWatchAddress; + const CTxOut& txout = wtx.tx->vout[i]; - if(wtx.txout_is_mine[nOut]) - { - // Ignore parts sent to self, as this is usually the change - // from a transaction sent back to our own address. + if (fAllFromMe) { + // Change is only really possible if we're the sender + // Otherwise, someone just sent groestlcoins to a change address, which should be shown + if (wtx.txout_is_change[i]) { continue; } - if (!std::get_if(&wtx.txout_address[nOut])) + // + // Debit + // + + TransactionRecord sub(hash, nTime); + sub.idx = i; + sub.involvesWatchAddress = involvesWatchAddress; + + if (!std::get_if(&wtx.txout_address[i])) { // Sent to Groestlcoin Address sub.type = TransactionRecord::SendToAddress; - sub.address = EncodeDestination(wtx.txout_address[nOut]); + sub.address = EncodeDestination(wtx.txout_address[i]); } else { @@ -151,15 +105,45 @@ QList TransactionRecord::decomposeTransaction(const interface parts.append(sub); } + + isminetype mine = wtx.txout_is_mine[i]; + if(mine) + { + // + // Credit + // + + TransactionRecord sub(hash, nTime); + sub.idx = i; // vout index + sub.credit = txout.nValue; + sub.involvesWatchAddress = mine & ISMINE_WATCH_ONLY; + if (wtx.txout_address_is_mine[i]) + { + // Received by Groestlcoin Address + sub.type = TransactionRecord::RecvWithAddress; + sub.address = EncodeDestination(wtx.txout_address[i]); + } + else + { + // Received by IP connection (deprecated features), or a multisignature or other non-simple transaction + sub.type = TransactionRecord::RecvFromOther; + sub.address = mapValue["from"]; + } + if (wtx.is_coinbase) + { + // Generated + sub.type = TransactionRecord::Generated; + } + + parts.append(sub); + } } - else - { - // - // Mixed debit transaction, can't break down payees - // - parts.append(TransactionRecord(hash, nTime, TransactionRecord::Other, "", nNet, 0)); - parts.last().involvesWatchAddress = involvesWatchAddress; - } + } else { + // + // Mixed debit transaction, can't break down payees + // + parts.append(TransactionRecord(hash, nTime, TransactionRecord::Other, "", nNet, 0)); + parts.last().involvesWatchAddress = involvesWatchAddress; } return parts; @@ -170,11 +154,21 @@ void TransactionRecord::updateStatus(const interfaces::WalletTxStatus& wtx, cons // Determine transaction status // Sort order, unrecorded transactions sort to the top - status.sortKey = strprintf("%010d-%01d-%010u-%03d", + int typesort; + switch (type) { + case SendToAddress: case SendToOther: + typesort = 2; break; + case RecvWithAddress: case RecvFromOther: + typesort = 3; break; + default: + typesort = 9; + } + status.sortKey = strprintf("%010d-%01d-%010u-%03d-%d", wtx.block_height, wtx.is_coinbase ? 1 : 0, wtx.time_received, - idx); + idx, + typesort); status.countsForBalance = wtx.is_trusted && !(wtx.blocks_to_maturity > 0); status.depth = wtx.depth_in_main_chain; status.m_cur_block_hash = block_hash; diff --git a/src/qt/transactionrecord.h b/src/qt/transactionrecord.h index 36cfb422e8af1..21b1bc0e018c1 100644 --- a/src/qt/transactionrecord.h +++ b/src/qt/transactionrecord.h @@ -69,7 +69,6 @@ class TransactionRecord SendToOther, RecvWithAddress, RecvFromOther, - SendToSelf }; /** Number of confirmation recommended for accepting a transaction */ diff --git a/src/qt/transactiontablemodel.cpp b/src/qt/transactiontablemodel.cpp index 25d54bdce6644..486e51c7776f5 100644 --- a/src/qt/transactiontablemodel.cpp +++ b/src/qt/transactiontablemodel.cpp @@ -17,6 +17,7 @@ #include #include +#include #include #include @@ -377,8 +378,6 @@ QString TransactionTableModel::formatTxType(const TransactionRecord *wtx) const case TransactionRecord::SendToAddress: case TransactionRecord::SendToOther: return tr("Sent to"); - case TransactionRecord::SendToSelf: - return tr("Payment to yourself"); case TransactionRecord::Generated: return tr("Mined"); default: @@ -421,8 +420,6 @@ QString TransactionTableModel::formatTxToAddress(const TransactionRecord *wtx, b return lookupAddress(wtx->address, tooltip) + watchAddress; case TransactionRecord::SendToOther: return QString::fromStdString(wtx->address) + watchAddress; - case TransactionRecord::SendToSelf: - return lookupAddress(wtx->address, tooltip) + watchAddress; default: return tr("(n/a)") + watchAddress; } @@ -441,8 +438,6 @@ QVariant TransactionTableModel::addressColor(const TransactionRecord *wtx) const if(label.isEmpty()) return COLOR_BAREADDRESS; } break; - case TransactionRecord::SendToSelf: - return COLOR_BAREADDRESS; default: break; } @@ -560,7 +555,7 @@ QVariant TransactionTableModel::data(const QModelIndex &index, int role) const case Status: return QString::fromStdString(rec->status.sortKey); case Date: - return rec->time; + return QString::fromStdString(strprintf("%020s-%s", rec->time, rec->status.sortKey)); case Type: return formatTxType(rec); case Watchonly: diff --git a/src/qt/transactionview.cpp b/src/qt/transactionview.cpp index 351305f3fabb1..67af62285de45 100644 --- a/src/qt/transactionview.cpp +++ b/src/qt/transactionview.cpp @@ -91,7 +91,6 @@ TransactionView::TransactionView(const PlatformStyle *platformStyle, QWidget *pa TransactionFilterProxy::TYPE(TransactionRecord::RecvFromOther)); typeWidget->addItem(tr("Sent to"), TransactionFilterProxy::TYPE(TransactionRecord::SendToAddress) | TransactionFilterProxy::TYPE(TransactionRecord::SendToOther)); - typeWidget->addItem(tr("To yourself"), TransactionFilterProxy::TYPE(TransactionRecord::SendToSelf)); typeWidget->addItem(tr("Mined"), TransactionFilterProxy::TYPE(TransactionRecord::Generated)); typeWidget->addItem(tr("Other"), TransactionFilterProxy::TYPE(TransactionRecord::Other)); diff --git a/src/qt/walletcontroller.cpp b/src/qt/walletcontroller.cpp index 8c8abf0e90124..ca2fa2d672f1f 100644 --- a/src/qt/walletcontroller.cpp +++ b/src/qt/walletcontroller.cpp @@ -435,3 +435,67 @@ void RestoreWalletActivity::finish() Q_EMIT finished(); } + +void MigrateWalletActivity::migrate(WalletModel* wallet_model) +{ + // Warn the user about migration + QMessageBox box(m_parent_widget); + box.setWindowTitle(tr("Migrate wallet")); + box.setText(tr("Are you sure you wish to migrate the wallet %1?").arg(GUIUtil::HtmlEscape(wallet_model->getDisplayName()))); + box.setInformativeText(tr("Migrating the wallet will convert this wallet to one or more descriptor wallets. A new wallet backup will need to be made.\n" + "If this wallet contains any watchonly scripts, a new wallet will be created which contains those watchonly scripts.\n" + "If this wallet contains any solvable but not watched scripts, a different and new wallet will be created which contains those scripts.\n\n" + "The migration process will create a backup of the wallet before migrating. This backup file will be named " + "-.legacy.bak and can be found in the directory for this wallet. In the event of " + "an incorrect migration, the backup can be restored with the \"Restore Wallet\" functionality.")); + box.setStandardButtons(QMessageBox::Yes|QMessageBox::Cancel); + box.setDefaultButton(QMessageBox::Yes); + if (box.exec() != QMessageBox::Yes) return; + + // Get the passphrase if it is encrypted regardless of it is locked or unlocked. We need the passphrase itself. + SecureString passphrase; + WalletModel::EncryptionStatus enc_status = wallet_model->getEncryptionStatus(); + if (enc_status == WalletModel::EncryptionStatus::Locked || enc_status == WalletModel::EncryptionStatus::Unlocked) { + AskPassphraseDialog dlg(AskPassphraseDialog::Unlock, m_parent_widget, &passphrase); + dlg.setModel(wallet_model); + dlg.exec(); + } + + // GUI needs to remove the wallet so that it can actually be unloaded by migration + const std::string name = wallet_model->wallet().getWalletName(); + m_wallet_controller->removeAndDeleteWallet(wallet_model); + + showProgressDialog(tr("Migrate Wallet"), tr("Migrating Wallet %1…").arg(GUIUtil::HtmlEscape(name))); + + QTimer::singleShot(0, worker(), [this, name, passphrase] { + auto res{node().walletLoader().migrateWallet(name, passphrase)}; + + if (res) { + m_success_message = tr("The wallet '%1' was migrated successfully.").arg(GUIUtil::HtmlEscape(res->wallet->getWalletName())); + if (res->watchonly_wallet_name) { + m_success_message += tr(" Watchonly scripts have been migrated to a new wallet named '%1'.").arg(GUIUtil::HtmlEscape(res->watchonly_wallet_name.value())); + } + if (res->solvables_wallet_name) { + m_success_message += tr(" Solvable but not watched scripts have been migrated to a new wallet named '%1'.").arg(GUIUtil::HtmlEscape(res->solvables_wallet_name.value())); + } + m_wallet_model = m_wallet_controller->getOrCreateWallet(std::move(res->wallet)); + } else { + m_error_message = util::ErrorString(res); + } + + QTimer::singleShot(0, this, &MigrateWalletActivity::finish); + }); +} + +void MigrateWalletActivity::finish() +{ + if (!m_error_message.empty()) { + QMessageBox::critical(m_parent_widget, tr("Migration failed"), QString::fromStdString(m_error_message.translated)); + } else { + QMessageBox::information(m_parent_widget, tr("Migration Successful"), m_success_message); + } + + if (m_wallet_model) Q_EMIT migrated(m_wallet_model); + + Q_EMIT finished(); +} diff --git a/src/qt/walletcontroller.h b/src/qt/walletcontroller.h index 8ec02438905e2..c595ba998db78 100644 --- a/src/qt/walletcontroller.h +++ b/src/qt/walletcontroller.h @@ -40,6 +40,7 @@ class path; class AskPassphraseDialog; class CreateWalletActivity; class CreateWalletDialog; +class MigrateWalletActivity; class OpenWalletActivity; class WalletControllerActivity; @@ -65,6 +66,8 @@ class WalletController : public QObject void closeWallet(WalletModel* wallet_model, QWidget* parent = nullptr); void closeAllWallets(QWidget* parent = nullptr); + void migrateWallet(WalletModel* wallet_model, QWidget* parent = nullptr); + Q_SIGNALS: void walletAdded(WalletModel* wallet_model); void walletRemoved(WalletModel* wallet_model); @@ -83,6 +86,7 @@ class WalletController : public QObject std::unique_ptr m_handler_load_wallet; friend class WalletControllerActivity; + friend class MigrateWalletActivity; }; class WalletControllerActivity : public QObject @@ -175,4 +179,22 @@ class RestoreWalletActivity : public WalletControllerActivity void finish(); }; +class MigrateWalletActivity : public WalletControllerActivity +{ + Q_OBJECT + +public: + MigrateWalletActivity(WalletController* wallet_controller, QWidget* parent) : WalletControllerActivity(wallet_controller, parent) {} + + void migrate(WalletModel* wallet_model); + +Q_SIGNALS: + void migrated(WalletModel* wallet_model); + +private: + QString m_success_message; + + void finish(); +}; + #endif // BITCOIN_QT_WALLETCONTROLLER_H diff --git a/src/qt/winshutdownmonitor.cpp b/src/qt/winshutdownmonitor.cpp index 386d593eeaab0..97a9ec318c4f5 100644 --- a/src/qt/winshutdownmonitor.cpp +++ b/src/qt/winshutdownmonitor.cpp @@ -43,7 +43,7 @@ bool WinShutdownMonitor::nativeEventFilter(const QByteArray &eventType, void *pM void WinShutdownMonitor::registerShutdownBlockReason(const QString& strReason, const HWND& mainWinId) { typedef BOOL (WINAPI *PSHUTDOWNBRCREATE)(HWND, LPCWSTR); - PSHUTDOWNBRCREATE shutdownBRCreate = (PSHUTDOWNBRCREATE)GetProcAddress(GetModuleHandleA("User32.dll"), "ShutdownBlockReasonCreate"); + PSHUTDOWNBRCREATE shutdownBRCreate = (PSHUTDOWNBRCREATE)(void*)GetProcAddress(GetModuleHandleA("User32.dll"), "ShutdownBlockReasonCreate"); if (shutdownBRCreate == nullptr) { qWarning() << "registerShutdownBlockReason: GetProcAddress for ShutdownBlockReasonCreate failed"; return; diff --git a/src/rpc/mempool.cpp b/src/rpc/mempool.cpp index 377e9de0e84b3..705608bd476a3 100644 --- a/src/rpc/mempool.cpp +++ b/src/rpc/mempool.cpp @@ -862,7 +862,7 @@ static RPCHelpMan submitpackage() }, [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue { - if (!Params().IsMockableChain()) { + if (Params().GetChainType() != ChainType::REGTEST) { throw std::runtime_error("submitpackage is for regression testing (-regtest mode) only"); } const UniValue raw_transactions = request.params[0].get_array(); diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index 605e318bcfbd1..0ccf81186c4c0 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -287,7 +287,7 @@ static RPCHelpMan addnode() strprintf("Addnode connections are limited to %u at a time", MAX_ADDNODE_CONNECTIONS) + " and are counted separately from the -maxconnections limit.\n", { - {"node", RPCArg::Type::STR, RPCArg::Optional::NO, "The node (see getpeerinfo for nodes)"}, + {"node", RPCArg::Type::STR, RPCArg::Optional::NO, "The address of the peer to connect to"}, {"command", RPCArg::Type::STR, RPCArg::Optional::NO, "'add' to add a node to the list, 'remove' to remove a node from the list, 'onetry' to try a connection to the node once"}, }, RPCResult{RPCResult::Type::NONE, "", ""}, @@ -297,10 +297,8 @@ static RPCHelpMan addnode() }, [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue { - std::string strCommand; - if (!request.params[1].isNull()) - strCommand = request.params[1].get_str(); - if (strCommand != "onetry" && strCommand != "add" && strCommand != "remove") { + const std::string command{request.params[1].get_str()}; + if (command != "onetry" && command != "add" && command != "remove") { throw std::runtime_error( self.ToString()); } @@ -308,24 +306,24 @@ static RPCHelpMan addnode() NodeContext& node = EnsureAnyNodeContext(request.context); CConnman& connman = EnsureConnman(node); - std::string strNode = request.params[0].get_str(); + const std::string node_arg{request.params[0].get_str()}; - if (strCommand == "onetry") + if (command == "onetry") { CAddress addr; - connman.OpenNetworkConnection(addr, false, nullptr, strNode.c_str(), ConnectionType::MANUAL); + connman.OpenNetworkConnection(addr, /*fCountFailure=*/false, /*grantOutbound=*/nullptr, node_arg.c_str(), ConnectionType::MANUAL); return UniValue::VNULL; } - if (strCommand == "add") + if (command == "add") { - if (!connman.AddNode(strNode)) { + if (!connman.AddNode(node_arg)) { throw JSONRPCError(RPC_CLIENT_NODE_ALREADY_ADDED, "Error: Node already added"); } } - else if(strCommand == "remove") + else if (command == "remove") { - if (!connman.RemoveAddedNode(strNode)) { + if (!connman.RemoveAddedNode(node_arg)) { throw JSONRPCError(RPC_CLIENT_NODE_NOT_ADDED, "Error: Node could not be removed. It has not been added previously."); } } @@ -1016,6 +1014,55 @@ static RPCHelpMan sendmsgtopeer() }; } +static RPCHelpMan getaddrmaninfo() +{ + return RPCHelpMan{"getaddrmaninfo", + "\nProvides information about the node's address manager by returning the number of " + "addresses in the `new` and `tried` tables and their sum for all networks.\n" + "This RPC is for testing only.\n", + {}, + RPCResult{ + RPCResult::Type::OBJ_DYN, "", "json object with network type as keys", + { + {RPCResult::Type::OBJ, "network", "the network (" + Join(GetNetworkNames(), ", ") + ")", + { + {RPCResult::Type::NUM, "new", "number of addresses in the new table, which represent potential peers the node has discovered but hasn't yet successfully connected to."}, + {RPCResult::Type::NUM, "tried", "number of addresses in the tried table, which represent peers the node has successfully connected to in the past."}, + {RPCResult::Type::NUM, "total", "total number of addresses in both new/tried tables"}, + }}, + } + }, + RPCExamples{ + HelpExampleCli("getaddrmaninfo", "") + + HelpExampleRpc("getaddrmaninfo", "") + }, + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue + { + NodeContext& node = EnsureAnyNodeContext(request.context); + if (!node.addrman) { + throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Address manager functionality missing or disabled"); + } + + UniValue ret(UniValue::VOBJ); + for (int n = 0; n < NET_MAX; ++n) { + enum Network network = static_cast(n); + if (network == NET_UNROUTABLE || network == NET_INTERNAL) continue; + UniValue obj(UniValue::VOBJ); + obj.pushKV("new", node.addrman->Size(network, true)); + obj.pushKV("tried", node.addrman->Size(network, false)); + obj.pushKV("total", node.addrman->Size(network)); + ret.pushKV(GetNetworkName(network), obj); + } + UniValue obj(UniValue::VOBJ); + obj.pushKV("new", node.addrman->Size(std::nullopt, true)); + obj.pushKV("tried", node.addrman->Size(std::nullopt, false)); + obj.pushKV("total", node.addrman->Size()); + ret.pushKV("all_networks", obj); + return ret; + }, + }; +} + void RegisterNetRPCCommands(CRPCTable& t) { static const CRPCCommand commands[]{ @@ -1035,6 +1082,7 @@ void RegisterNetRPCCommands(CRPCTable& t) {"hidden", &addconnection}, {"hidden", &addpeeraddress}, {"hidden", &sendmsgtopeer}, + {"hidden", &getaddrmaninfo}, }; for (const auto& c : commands) { t.appendCommand(c.name, &c); diff --git a/src/test/fuzz/deserialize.cpp b/src/test/fuzz/deserialize.cpp index 100a6b4ee45fb..510ee7fb5b06a 100644 --- a/src/test/fuzz/deserialize.cpp +++ b/src/test/fuzz/deserialize.cpp @@ -91,9 +91,9 @@ void DeserializeFromFuzzingInput(FuzzBufferType buffer, T&& obj, const P& params } template -CDataStream Serialize(const T& obj, const int version = INIT_PROTO_VERSION, const int ser_type = SER_NETWORK) +CDataStream Serialize(const T& obj) { - CDataStream ds(ser_type, version); + CDataStream ds{SER_NETWORK, INIT_PROTO_VERSION}; ds << obj; return ds; } @@ -107,12 +107,10 @@ T Deserialize(CDataStream ds) } template -void DeserializeFromFuzzingInput(FuzzBufferType buffer, T&& obj, const std::optional protocol_version = std::nullopt, const int ser_type = SER_NETWORK) +void DeserializeFromFuzzingInput(FuzzBufferType buffer, T&& obj) { - CDataStream ds(buffer, ser_type, INIT_PROTO_VERSION); - if (protocol_version) { - ds.SetVersion(*protocol_version); - } else { + CDataStream ds{buffer, SER_NETWORK, INIT_PROTO_VERSION}; + { try { int version; ds >> version; @@ -135,9 +133,9 @@ void AssertEqualAfterSerializeDeserialize(const T& obj, const P& params) assert(Deserialize(Serialize(obj, params), params) == obj); } template -void AssertEqualAfterSerializeDeserialize(const T& obj, const int version = INIT_PROTO_VERSION, const int ser_type = SER_NETWORK) +void AssertEqualAfterSerializeDeserialize(const T& obj) { - assert(Deserialize(Serialize(obj, version, ser_type)) == obj); + assert(Deserialize(Serialize(obj)) == obj); } } // namespace @@ -254,7 +252,7 @@ FUZZ_TARGET(netaddr_deserialize, .init = initialize_deserialize) if (!maybe_na) return; const CNetAddr& na{*maybe_na}; if (na.IsAddrV1Compatible()) { - AssertEqualAfterSerializeDeserialize(na, ConsumeDeserializationParams(fdp)); + AssertEqualAfterSerializeDeserialize(na, CNetAddr::V1); } AssertEqualAfterSerializeDeserialize(na, CNetAddr::V2); } @@ -266,7 +264,7 @@ FUZZ_TARGET(service_deserialize, .init = initialize_deserialize) if (!maybe_s) return; const CService& s{*maybe_s}; if (s.IsAddrV1Compatible()) { - AssertEqualAfterSerializeDeserialize(s, ConsumeDeserializationParams(fdp)); + AssertEqualAfterSerializeDeserialize(s, CNetAddr::V1); } AssertEqualAfterSerializeDeserialize(s, CNetAddr::V2); if (ser_params.enc == CNetAddr::Encoding::V1) { @@ -281,8 +279,8 @@ FUZZ_TARGET_DESERIALIZE(messageheader_deserialize, { FUZZ_TARGET(address_deserialize, .init = initialize_deserialize) { FuzzedDataProvider fdp{buffer.data(), buffer.size()}; - const auto ser_enc{ConsumeDeserializationParams(fdp)}; - const auto maybe_a{ConsumeDeserializable(fdp, CAddress::SerParams{{ser_enc}, CAddress::Format::Network})}; + const auto ser_enc{ConsumeDeserializationParams(fdp)}; + const auto maybe_a{ConsumeDeserializable(fdp, ser_enc)}; if (!maybe_a) return; const CAddress& a{*maybe_a}; // A CAddress in V1 mode will roundtrip diff --git a/src/test/fuzz/rpc.cpp b/src/test/fuzz/rpc.cpp index 74f06b481a9bb..7e9a18e1d0bbe 100644 --- a/src/test/fuzz/rpc.cpp +++ b/src/test/fuzz/rpc.cpp @@ -110,6 +110,7 @@ const std::vector RPC_COMMANDS_SAFE_FOR_FUZZING{ "generate", "generateblock", "getaddednodeinfo", + "getaddrmaninfo", "getbestblockhash", "getblock", "getblockchaininfo", diff --git a/src/test/txpackage_tests.cpp b/src/test/txpackage_tests.cpp index 10ab656d38821..571b58156f737 100644 --- a/src/test/txpackage_tests.cpp +++ b/src/test/txpackage_tests.cpp @@ -51,14 +51,14 @@ BOOST_FIXTURE_TEST_CASE(package_sanitization_tests, TestChain100Setup) BOOST_CHECK_EQUAL(state_too_many.GetResult(), PackageValidationResult::PCKG_POLICY); BOOST_CHECK_EQUAL(state_too_many.GetRejectReason(), "package-too-many-transactions"); - // Packages can't have a total size of more than 101KvB. + // Packages can't have a total weight of more than 404'000WU. CTransactionRef large_ptx = create_placeholder_tx(150, 150); Package package_too_large; - auto size_large = GetVirtualTransactionSize(*large_ptx); - size_t total_size{0}; - while (total_size <= MAX_PACKAGE_SIZE * 1000) { + auto size_large = GetTransactionWeight(*large_ptx); + size_t total_weight{0}; + while (total_weight <= MAX_PACKAGE_WEIGHT) { package_too_large.push_back(large_ptx); - total_size += size_large; + total_weight += size_large; } BOOST_CHECK(package_too_large.size() <= MAX_PACKAGE_COUNT); PackageValidationState state_too_large; @@ -122,7 +122,7 @@ BOOST_FIXTURE_TEST_CASE(package_validation_tests, TestChain100Setup) // A single, giant transaction submitted through ProcessNewPackage fails on single tx policy. CTransactionRef giant_ptx = create_placeholder_tx(999, 999); - BOOST_CHECK(GetVirtualTransactionSize(*giant_ptx) > MAX_PACKAGE_SIZE * 1000); + BOOST_CHECK(GetVirtualTransactionSize(*giant_ptx) > DEFAULT_ANCESTOR_SIZE_LIMIT_KVB * 1000); auto result_single_large = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, {giant_ptx}, /*test_accept=*/true); BOOST_CHECK(result_single_large.m_state.IsInvalid()); BOOST_CHECK_EQUAL(result_single_large.m_state.GetResult(), PackageValidationResult::PCKG_TX); diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 92379484e3a46..e021cfb06e376 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -197,19 +197,34 @@ util::Result CTxMemPool::CalculateAncestorsAndCheckLimit } bool CTxMemPool::CheckPackageLimits(const Package& package, - const Limits& limits, + const int64_t total_vsize, std::string &errString) const { + size_t pack_count = package.size(); + + // Package itself is busting mempool limits; should be rejected even if no staged_ancestors exist + if (pack_count > static_cast(m_limits.ancestor_count)) { + errString = strprintf("package count %u exceeds ancestor count limit [limit: %u]", pack_count, m_limits.ancestor_count); + return false; + } else if (pack_count > static_cast(m_limits.descendant_count)) { + errString = strprintf("package count %u exceeds descendant count limit [limit: %u]", pack_count, m_limits.descendant_count); + return false; + } else if (total_vsize > m_limits.ancestor_size_vbytes) { + errString = strprintf("package size %u exceeds ancestor size limit [limit: %u]", total_vsize, m_limits.ancestor_size_vbytes); + return false; + } else if (total_vsize > m_limits.descendant_size_vbytes) { + errString = strprintf("package size %u exceeds descendant size limit [limit: %u]", total_vsize, m_limits.descendant_size_vbytes); + return false; + } + CTxMemPoolEntry::Parents staged_ancestors; - int64_t total_size = 0; for (const auto& tx : package) { - total_size += GetVirtualTransactionSize(*tx); for (const auto& input : tx->vin) { std::optional piter = GetIter(input.prevout.hash); if (piter) { staged_ancestors.insert(**piter); - if (staged_ancestors.size() + package.size() > static_cast(limits.ancestor_count)) { - errString = strprintf("too many unconfirmed parents [limit: %u]", limits.ancestor_count); + if (staged_ancestors.size() + package.size() > static_cast(m_limits.ancestor_count)) { + errString = strprintf("too many unconfirmed parents [limit: %u]", m_limits.ancestor_count); return false; } } @@ -218,8 +233,8 @@ bool CTxMemPool::CheckPackageLimits(const Package& package, // When multiple transactions are passed in, the ancestors and descendants of all transactions // considered together must be within limits even if they are not interdependent. This may be // stricter than the limits for each individual transaction. - const auto ancestors{CalculateAncestorsAndCheckLimits(total_size, package.size(), - staged_ancestors, limits)}; + const auto ancestors{CalculateAncestorsAndCheckLimits(total_vsize, package.size(), + staged_ancestors, m_limits)}; // It's possible to overestimate the ancestor/descendant totals. if (!ancestors.has_value()) errString = "possibly " + util::ErrorString(ancestors).original; return ancestors.has_value(); diff --git a/src/txmempool.h b/src/txmempool.h index fcef19e8070fe..8ea3373cb47be 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -606,11 +606,11 @@ class CTxMemPool * @param[in] package Transaction package being evaluated for acceptance * to mempool. The transactions need not be direct * ancestors/descendants of each other. - * @param[in] limits Maximum number and size of ancestors and descendants + * @param[in] total_vsize Sum of virtual sizes for all transactions in package. * @param[out] errString Populated with error reason if a limit is hit. */ bool CheckPackageLimits(const Package& package, - const Limits& limits, + int64_t total_vsize, std::string &errString) const EXCLUSIVE_LOCKS_REQUIRED(cs); /** Populate setDescendants with all in-mempool descendants of hash. diff --git a/src/validation.cpp b/src/validation.cpp index cc7aab15c29b1..80b9d134555ec 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -433,8 +433,7 @@ class MemPoolAccept m_pool(mempool), m_view(&m_dummy), m_viewmempool(&active_chainstate.CoinsTip(), m_pool), - m_active_chainstate(active_chainstate), - m_limits{m_pool.m_limits} + m_active_chainstate(active_chainstate) { } @@ -635,6 +634,7 @@ class MemPoolAccept // Enforce package mempool ancestor/descendant limits (distinct from individual // ancestor/descendant limits done in PreChecks). bool PackageMempoolChecks(const std::vector& txns, + int64_t total_vsize, PackageValidationState& package_state) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); // Run the script checks using our policy flags. As this can be slow, we should @@ -684,8 +684,6 @@ class MemPoolAccept Chainstate& m_active_chainstate; - CTxMemPool::Limits m_limits; - /** Whether the transaction(s) would replace any mempool transactions. If so, RBF rules apply. */ bool m_rbf{false}; }; @@ -874,6 +872,11 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) if (!bypass_limits && !args.m_package_feerates && !CheckFeeRate(ws.m_vsize, ws.m_modified_fees, state)) return false; ws.m_iters_conflicting = m_pool.GetIterSet(ws.m_conflicts); + + // Note that these modifications are only applicable to single transaction scenarios; + // carve-outs and package RBF are disabled for multi-transaction evaluations. + CTxMemPool::Limits maybe_rbf_limits = m_pool.m_limits; + // Calculate in-mempool ancestors, up to a limit. if (ws.m_conflicts.size() == 1) { // In general, when we receive an RBF transaction with mempool conflicts, we want to know whether we @@ -906,11 +909,11 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) assert(ws.m_iters_conflicting.size() == 1); CTxMemPool::txiter conflict = *ws.m_iters_conflicting.begin(); - m_limits.descendant_count += 1; - m_limits.descendant_size_vbytes += conflict->GetSizeWithDescendants(); + maybe_rbf_limits.descendant_count += 1; + maybe_rbf_limits.descendant_size_vbytes += conflict->GetSizeWithDescendants(); } - auto ancestors{m_pool.CalculateMemPoolAncestors(*entry, m_limits)}; + auto ancestors{m_pool.CalculateMemPoolAncestors(*entry, maybe_rbf_limits)}; if (!ancestors) { // If CalculateMemPoolAncestors fails second time, we want the original error string. // Contracting/payment channels CPFP carve-out: @@ -926,9 +929,9 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // this, see https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html CTxMemPool::Limits cpfp_carve_out_limits{ .ancestor_count = 2, - .ancestor_size_vbytes = m_limits.ancestor_size_vbytes, - .descendant_count = m_limits.descendant_count + 1, - .descendant_size_vbytes = m_limits.descendant_size_vbytes + EXTRA_DESCENDANT_TX_SIZE_LIMIT, + .ancestor_size_vbytes = maybe_rbf_limits.ancestor_size_vbytes, + .descendant_count = maybe_rbf_limits.descendant_count + 1, + .descendant_size_vbytes = maybe_rbf_limits.descendant_size_vbytes + EXTRA_DESCENDANT_TX_SIZE_LIMIT, }; const auto error_message{util::ErrorString(ancestors).original}; if (ws.m_vsize > EXTRA_DESCENDANT_TX_SIZE_LIMIT) { @@ -1001,6 +1004,7 @@ bool MemPoolAccept::ReplacementChecks(Workspace& ws) } bool MemPoolAccept::PackageMempoolChecks(const std::vector& txns, + const int64_t total_vsize, PackageValidationState& package_state) { AssertLockHeld(cs_main); @@ -1011,7 +1015,7 @@ bool MemPoolAccept::PackageMempoolChecks(const std::vector& txn { return !m_pool.exists(GenTxid::Txid(tx->GetHash()));})); std::string err_string; - if (!m_pool.CheckPackageLimits(txns, m_limits, err_string)) { + if (!m_pool.CheckPackageLimits(txns, total_vsize, err_string)) { // This is a package-wide error, separate from an individual transaction error. return package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-mempool-limits", err_string); } @@ -1166,7 +1170,7 @@ bool MemPoolAccept::SubmitPackage(const ATMPArgs& args, std::vector& // Re-calculate mempool ancestors to call addUnchecked(). They may have changed since the // last calculation done in PreChecks, since package ancestors have already been submitted. { - auto ancestors{m_pool.CalculateMemPoolAncestors(*ws.m_entry, m_limits)}; + auto ancestors{m_pool.CalculateMemPoolAncestors(*ws.m_entry, m_pool.m_limits)}; if(!ancestors) { results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state)); // Since PreChecks() and PackageMempoolChecks() both enforce limits, this should never fail. @@ -1296,7 +1300,7 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std:: // because it's unnecessary. Also, CPFP carve out can increase the limit for individual // transactions, but this exemption is not extended to packages in CheckPackageLimits(). std::string err_string; - if (txns.size() > 1 && !PackageMempoolChecks(txns, package_state)) { + if (txns.size() > 1 && !PackageMempoolChecks(txns, m_total_vsize, package_state)) { return PackageMempoolAcceptResult(package_state, std::move(results)); } diff --git a/src/wallet/interfaces.cpp b/src/wallet/interfaces.cpp index 5f2aee6923e7f..65285187f4c57 100644 --- a/src/wallet/interfaces.cpp +++ b/src/wallet/interfaces.cpp @@ -41,6 +41,7 @@ using interfaces::Wallet; using interfaces::WalletAddress; using interfaces::WalletBalances; using interfaces::WalletLoader; +using interfaces::WalletMigrationResult; using interfaces::WalletOrderForm; using interfaces::WalletTx; using interfaces::WalletTxOut; @@ -66,6 +67,7 @@ WalletTx MakeWalletTx(CWallet& wallet, const CWalletTx& wtx) result.txout_address_is_mine.reserve(wtx.tx->vout.size()); for (const auto& txout : wtx.tx->vout) { result.txout_is_mine.emplace_back(wallet.IsMine(txout)); + result.txout_is_change.push_back(OutputIsChange(wallet, txout)); result.txout_address.emplace_back(); result.txout_address_is_mine.emplace_back(ExtractDestination(txout.scriptPubKey, result.txout_address.back()) ? wallet.IsMine(result.txout_address.back()) : @@ -630,6 +632,18 @@ class WalletLoaderImpl : public WalletLoader return util::Error{error}; } } + util::Result migrateWallet(const std::string& name, const SecureString& passphrase) override + { + auto res = wallet::MigrateLegacyToDescriptor(name, passphrase, m_context); + if (!res) return util::Error{util::ErrorString(res)}; + WalletMigrationResult out{ + .wallet = MakeWallet(m_context, res->wallet), + .watchonly_wallet_name = res->watchonly_wallet ? std::make_optional(res->watchonly_wallet->GetName()) : std::nullopt, + .solvables_wallet_name = res->solvables_wallet ? std::make_optional(res->solvables_wallet->GetName()) : std::nullopt, + .backup_path = res->backup_path, + }; + return {std::move(out)}; // std::move to work around clang bug + } std::string getWalletDir() override { return fs::PathToString(GetWalletDir()); diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 6c9714418db7a..692ea06f7c2c7 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -4213,7 +4213,7 @@ util::Result MigrateLegacyToDescriptor(const std::string& walle // Migration successful, unload the wallet locally, then reload it. assert(local_wallet.use_count() == 1); local_wallet.reset(); - LoadWallet(context, wallet_name, /*load_on_start=*/std::nullopt, options, status, error, warnings); + res.wallet = LoadWallet(context, wallet_name, /*load_on_start=*/std::nullopt, options, status, error, warnings); res.wallet_name = wallet_name; } else { // Migration failed, cleanup diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index 6a788a609cc76..ce07d42d82df6 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -1087,6 +1087,7 @@ bool RemoveWalletSetting(interfaces::Chain& chain, const std::string& wallet_nam struct MigrationResult { std::string wallet_name; + std::shared_ptr wallet; std::shared_ptr watchonly_wallet; std::shared_ptr solvables_wallet; fs::path backup_path; diff --git a/test/functional/mempool_limit.py b/test/functional/mempool_limit.py index 0abebbec02583..a1147f70f33e1 100644 --- a/test/functional/mempool_limit.py +++ b/test/functional/mempool_limit.py @@ -78,6 +78,56 @@ def fill_mempool(self): assert_equal(node.getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000')) assert_greater_than(node.getmempoolinfo()['mempoolminfee'], Decimal('0.00001000')) + def test_rbf_carveout_disallowed(self): + node = self.nodes[0] + + self.log.info("Check that individually-evaluated transactions in a package don't increase package limits for other subpackage parts") + + # We set chain limits to 2 ancestors, 1 descendant, then try to get a parents-and-child chain of 2 in mempool + # + # A: Solo transaction to be RBF'd (to bump descendant limit for package later) + # B: First transaction in package, RBFs A by itself under individual evaluation, which would give it +1 descendant limit + # C: Second transaction in package, spends B. If the +1 descendant limit persisted, would make it into mempool + + self.restart_node(0, extra_args=self.extra_args[0] + ["-limitancestorcount=2", "-limitdescendantcount=1"]) + + # Generate a confirmed utxo we will double-spend + rbf_utxo = self.wallet.send_self_transfer( + from_node=node, + confirmed_only=True + )["new_utxo"] + self.generate(node, 1) + + # tx_A needs to be RBF'd, set minfee at set size + A_weight = 1000 + mempoolmin_feerate = node.getmempoolinfo()["mempoolminfee"] + tx_A = self.wallet.send_self_transfer( + from_node=node, + fee=(mempoolmin_feerate / 1000) * (A_weight // 4) + Decimal('0.000001'), + target_weight=A_weight, + utxo_to_spend=rbf_utxo, + confirmed_only=True + ) + + # RBF's tx_A, is not yet submitted + tx_B = self.wallet.create_self_transfer( + fee=tx_A["fee"] * 4, + target_weight=A_weight, + utxo_to_spend=rbf_utxo, + confirmed_only=True + ) + + # Spends tx_B's output, too big for cpfp carveout (because that would also increase the descendant limit by 1) + non_cpfp_carveout_weight = 40001 # EXTRA_DESCENDANT_TX_SIZE_LIMIT + 1 + tx_C = self.wallet.create_self_transfer( + target_weight=non_cpfp_carveout_weight, + fee = (mempoolmin_feerate / 1000) * (non_cpfp_carveout_weight // 4) + Decimal('0.000001'), + utxo_to_spend=tx_B["new_utxo"], + confirmed_only=True + ) + + assert_raises_rpc_error(-26, "too-long-mempool-chain", node.submitpackage, [tx_B["hex"], tx_C["hex"]]) + def test_mid_package_eviction(self): node = self.nodes[0] self.log.info("Check a package where each parent passes the current mempoolminfee but would cause eviction before package submission terminates") @@ -324,6 +374,7 @@ def run_test(self): self.test_mid_package_replacement() self.test_mid_package_eviction() + self.test_rbf_carveout_disallowed() if __name__ == '__main__': diff --git a/test/functional/mempool_sigoplimit.py b/test/functional/mempool_sigoplimit.py index 962b2b19bd8ef..fbec6d0dc8b7b 100755 --- a/test/functional/mempool_sigoplimit.py +++ b/test/functional/mempool_sigoplimit.py @@ -3,6 +3,7 @@ # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test sigop limit mempool policy (`-bytespersigop` parameter)""" +from decimal import Decimal from math import ceil from test_framework.messages import ( @@ -25,6 +26,7 @@ OP_TRUE, ) from test_framework.script_util import ( + keys_to_multisig_script, script_to_p2wsh_script, ) from test_framework.test_framework import BitcoinTestFramework @@ -32,9 +34,10 @@ assert_equal, assert_greater_than, assert_greater_than_or_equal, + assert_raises_rpc_error, ) from test_framework.wallet import MiniWallet - +from test_framework.wallet_util import generate_keypair DEFAULT_BYTES_PER_SIGOP = 20 # default setting @@ -133,6 +136,45 @@ def test_sigops_limit(self, bytes_per_sigop, num_sigops): assert_equal(entry_parent['descendantcount'], 2) assert_equal(entry_parent['descendantsize'], parent_tx.get_vsize() + sigop_equivalent_vsize) + def test_sigops_package(self): + self.log.info("Test a overly-large sigops-vbyte hits package limits") + # Make a 2-transaction package which fails vbyte checks even though + # separately they would work. + self.restart_node(0, extra_args=["-bytespersigop=5000"] + self.extra_args[0]) + + def create_bare_multisig_tx(utxo_to_spend=None): + _, pubkey = generate_keypair() + amount_for_bare = 50000 + tx_dict = self.wallet.create_self_transfer(fee=Decimal("3"), utxo_to_spend=utxo_to_spend) + tx_utxo = tx_dict["new_utxo"] + tx = tx_dict["tx"] + tx.vout.append(CTxOut(amount_for_bare, keys_to_multisig_script([pubkey], k=1))) + tx.vout[0].nValue -= amount_for_bare + tx_utxo["txid"] = tx.rehash() + tx_utxo["value"] -= Decimal("0.00005000") + return (tx_utxo, tx) + + tx_parent_utxo, tx_parent = create_bare_multisig_tx() + tx_child_utxo, tx_child = create_bare_multisig_tx(tx_parent_utxo) + + # Separately, the parent tx is ok + parent_individual_testres = self.nodes[0].testmempoolaccept([tx_parent.serialize().hex()])[0] + assert parent_individual_testres["allowed"] + # Multisig is counted as MAX_PUBKEYS_PER_MULTISIG = 20 sigops + assert_equal(parent_individual_testres["vsize"], 5000 * 20) + + # But together, it's exceeding limits in the *package* context. If sigops adjusted vsize wasn't being checked + # here, it would get further in validation and give too-long-mempool-chain error instead. + packet_test = self.nodes[0].testmempoolaccept([tx_parent.serialize().hex(), tx_child.serialize().hex()]) + assert_equal([x["package-error"] for x in packet_test], ["package-mempool-limits", "package-mempool-limits"]) + + # When we actually try to submit, the parent makes it into the mempool, but the child would exceed ancestor vsize limits + assert_raises_rpc_error(-26, "too-long-mempool-chain", self.nodes[0].submitpackage, [tx_parent.serialize().hex(), tx_child.serialize().hex()]) + assert tx_parent.rehash() in self.nodes[0].getrawmempool() + + # Transactions are tiny in weight + assert_greater_than(2000, tx_parent.get_weight() + tx_child.get_weight()) + def run_test(self): self.wallet = MiniWallet(self.nodes[0]) @@ -149,6 +191,8 @@ def run_test(self): self.generate(self.wallet, 1) + self.test_sigops_package() + if __name__ == '__main__': BytesPerSigOpTest().main() diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index cfc177574fe93..b398ef51e1810 100644 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -70,9 +70,9 @@ SIGHASH_ANYONECANPAY, SIGHASH_NONE, SIGHASH_SINGLE, - SegwitV0SignatureHash, hash160, sign_input_legacy, + sign_input_segwitv0, ) from test_framework.script_util import ( key_to_p2pk_script, @@ -121,10 +121,8 @@ def func_wrapper(self, *args, **kwargs): def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key): """Add signature for a P2PK witness script.""" - tx_hash = SegwitV0SignatureHash(script, tx_to, in_idx, hashtype, value) - signature = key.sign_ecdsa(tx_hash) + chr(hashtype).encode('latin-1') - tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [signature, script] - tx_to.rehash() + tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [script] + sign_input_segwitv0(tx_to, in_idx, script, value, key, hashtype) def test_transaction_acceptance(node, p2p, tx, with_witness, accepted, reason=None): """Send a transaction to the node and check that it's accepted to the mempool @@ -1476,11 +1474,9 @@ def test_uncompressed_pubkey(self): tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_wsh)) script = keyhash_to_p2pkh_script(pubkeyhash) - sig_hash = SegwitV0SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue) - signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey] - tx2.rehash() + tx2.wit.vtxinwit[0].scriptWitness.stack = [pubkey] + sign_input_segwitv0(tx2, 0, script, tx.vout[0].nValue, key) # Should fail policy test. test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') @@ -1676,11 +1672,13 @@ def test_signature_version_1(self): tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE]))) script = keyhash_to_p2pkh_script(pubkeyhash) - sig_hash = SegwitV0SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue) - signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL + tx2.wit.vtxinwit.append(CTxInWitness()) + sign_input_segwitv0(tx2, 0, script, tx.vout[0].nValue, key) + signature = tx2.wit.vtxinwit[0].scriptWitness.stack.pop() # Check that we can't have a scriptSig tx2.vin[0].scriptSig = CScript([signature, pubkey]) + tx2.rehash() block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx, tx2]) test_witness_block(self.nodes[0], self.test_node, block, accepted=False, diff --git a/test/functional/rpc_estimatefee.py b/test/functional/rpc_estimatefee.py index dad3cbcf0cb2a..6643799a76700 100755 --- a/test/functional/rpc_estimatefee.py +++ b/test/functional/rpc_estimatefee.py @@ -36,6 +36,9 @@ def run_test(self): assert_raises_rpc_error(-1, "estimatesmartfee", self.nodes[0].estimatesmartfee, 1, 'ECONOMICAL', 1) assert_raises_rpc_error(-1, "estimaterawfee", self.nodes[0].estimaterawfee, 1, 1, 1) + # max value of 1008 per src/policy/fees.h + assert_raises_rpc_error(-8, "Invalid conf_target, must be between 1 and 1008", self.nodes[0].estimaterawfee, 1009) + # valid calls self.nodes[0].estimatesmartfee(1) # self.nodes[0].estimatesmartfee(1, None) diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index 255f5108a2556..117802b812ceb 100644 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -61,11 +61,12 @@ def run_test(self): self.test_getpeerinfo() self.test_getnettotals() self.test_getnetworkinfo() - self.test_getaddednodeinfo() + self.test_addnode_getaddednodeinfo() self.test_service_flags() self.test_getnodeaddresses() self.test_addpeeraddress() self.test_sendmsgtopeer() + self.test_getaddrmaninfo() def test_connection_count(self): self.log.info("Test getconnectioncount") @@ -204,8 +205,8 @@ def test_getnetworkinfo(self): # Check dynamically generated networks list in getnetworkinfo help output. assert "(ipv4, ipv6, onion, i2p, cjdns)" in self.nodes[0].help("getnetworkinfo") - def test_getaddednodeinfo(self): - self.log.info("Test getaddednodeinfo") + def test_addnode_getaddednodeinfo(self): + self.log.info("Test addnode and getaddednodeinfo") assert_equal(self.nodes[0].getaddednodeinfo(), []) # add a node (node2) to node0 ip_port = "127.0.0.1:{}".format(p2p_port(2)) @@ -219,6 +220,8 @@ def test_getaddednodeinfo(self): # check that node can be removed self.nodes[0].addnode(node=ip_port, command='remove') assert_equal(self.nodes[0].getaddednodeinfo(), []) + # check that an invalid command returns an error + assert_raises_rpc_error(-1, 'addnode "node" "command"', self.nodes[0].addnode, node=ip_port, command='abc') # check that trying to remove the node again returns an error assert_raises_rpc_error(-24, "Node could not be removed", self.nodes[0].addnode, node=ip_port, command='remove') # check that a non-existent node returns an error @@ -360,6 +363,28 @@ def test_sendmsgtopeer(self): node.sendmsgtopeer(peer_id=0, msg_type="addr", msg=zero_byte_string.hex()) self.wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 0, timeout=10) + def test_getaddrmaninfo(self): + self.log.info("Test getaddrmaninfo") + node = self.nodes[1] + + self.log.debug("Test that getaddrmaninfo is a hidden RPC") + # It is hidden from general help, but its detailed help may be called directly. + assert "getaddrmaninfo" not in node.help() + assert "getaddrmaninfo" in node.help("getaddrmaninfo") + + # current count of ipv4 addresses in addrman is {'new':1, 'tried':1} + self.log.info("Test that count of addresses in addrman match expected values") + res = node.getaddrmaninfo() + assert_equal(res["ipv4"]["new"], 1) + assert_equal(res["ipv4"]["tried"], 1) + assert_equal(res["ipv4"]["total"], 2) + assert_equal(res["all_networks"]["new"], 1) + assert_equal(res["all_networks"]["tried"], 1) + assert_equal(res["all_networks"]["total"], 2) + for net in ["ipv6", "onion", "i2p", "cjdns"]: + assert_equal(res[net]["new"], 0) + assert_equal(res[net]["tried"], 0) + assert_equal(res[net]["total"], 0) if __name__ == '__main__': NetTest().main() diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py index 047ba4206cdbc..0c07373171e56 100644 --- a/test/functional/test_framework/script.py +++ b/test/functional/test_framework/script.py @@ -699,6 +699,15 @@ def sign_input_legacy(tx, input_index, input_scriptpubkey, privkey, sighash_type tx.vin[input_index].scriptSig = bytes(CScript([der_sig + bytes([sighash_type])])) + tx.vin[input_index].scriptSig tx.rehash() +def sign_input_segwitv0(tx, input_index, input_scriptpubkey, input_amount, privkey, sighash_type=SIGHASH_ALL): + """Add segwitv0 ECDSA signature for a given transaction input. Note that the signature + is inserted at the bottom of the witness stack, i.e. additional witness data + needed (e.g. pubkey for P2WPKH) can already be set before.""" + sighash = SegwitV0SignatureHash(input_scriptpubkey, tx, input_index, sighash_type, input_amount) + der_sig = privkey.sign_ecdsa(sighash) + tx.wit.vtxinwit[input_index].scriptWitness.stack.insert(0, der_sig + bytes([sighash_type])) + tx.rehash() + # TODO: Allow cached hashPrevouts/hashSequence/hashOutputs to be provided. # Performance optimization probably not necessary for python tests, however. # Note that this corresponds to sigversion == 1 in EvalScript, which is used